diff --git a/.gitignore b/.gitignore
index 99ad9b30..954cd12d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,4 +9,5 @@ venv/
myvenv/
voiceChatMode/model_checkpoints/
voiceChatMode/wav2vec2_model/
-model_checkpoints/
\ No newline at end of file
+model_checkpoints/
+CBT/
diff --git a/Depression/deperession.txt b/Depression/deperession.txt
new file mode 100644
index 00000000..4a794320
--- /dev/null
+++ b/Depression/deperession.txt
@@ -0,0 +1,5 @@
+https://www.verywellmind.com/what-is-moderate-depression-5072794
+
+https://www.healthline.com/health/depression/mild-depression
+
+
diff --git a/ExtraCodes/LearningAgent/LearningAgent.py b/ExtraCodes/LearningAgent/LearningAgent.py
new file mode 100644
index 00000000..d9c41236
--- /dev/null
+++ b/ExtraCodes/LearningAgent/LearningAgent.py
@@ -0,0 +1,400 @@
+"""
+Continuous Learning Agent implementation for the Multi-Agent Depression System.
+
+Responsibilities implemented here (synchronous, on-call):
+ - ingest anonymized interaction history into the vector-backed knowledge base
+ - create / export fine-tune-ready JSONL examples (instruction -> response)
+ - produce a short "update plan" (human-reviewable) describing common failures
+ - apply explicit feedback labels (corrections) to stored examples
+ - lightweight drift / distribution checks to highlight when classifier outputs shift
+
+This file is intended to be imported and called from your app (e.g. when a session ends,
+or when the monitoring/classifier agent returns feedback). It does NOT perform any
+asynchronous scheduling or background training — it prepares artifacts and updates the DB
+so a human or a CI job can trigger heavy ops (fine-tune, full reindex, etc).
+"""
+
+import os
+import re
+import json
+import time
+from typing import List, Dict, Optional, Tuple, Any
+from datetime import datetime
+
+from pydantic import BaseModel
+from pymongo import MongoClient, ASCENDING
+from langchain_openai import OpenAIEmbeddings, ChatOpenAI
+
+import key_param
+
+# Configurable defaults
+DEFAULT_EMBED_MODEL = "text-embedding-3-small"
+DEFAULT_LLM_MODEL = "gpt-3.5-turbo"
+CHUNK_SIZE = 800 # characters per chunk when storing KB
+FT_BUFFER_COLLECTION = "fine_tune_buffer" # collection to hold labeled examples for later export
+
+_EMAIL_RE = re.compile(r"[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+")
+_PHONE_RE = re.compile(r"(\+?\d[\d\-\s]{6,}\d)")
+
+# -------------------------
+# Data models
+# -------------------------
+class IngestResult(BaseModel):
+ inserted_count: int
+ last_ids: List[Any]
+
+class FeedbackRecord(BaseModel):
+ interaction_id: Any
+ corrected_label: str
+ note: Optional[str] = None
+ timestamp: float = time.time()
+
+# -------------------------
+# Utilities
+# -------------------------
+def _anonymize_text(s: str) -> str:
+ """Remove emails and phone numbers, minimal PII masking.
+ This is intentionally conservative; you may add stronger PII removal
+ (names, addresses) if you have heuristics.
+ """
+ if not s:
+ return s
+ s = _EMAIL_RE.sub("[email_removed]", s)
+ s = _PHONE_RE.sub("[phone_removed]", s)
+ # collapse sequences of whitespace
+ s = re.sub(r"\s+", " ", s).strip()
+ return s
+
+def _chunk_text(s: str, max_len: int = CHUNK_SIZE) -> List[str]:
+ """Simple character-based chunking that tries to break on sentence boundaries."""
+ if not s:
+ return []
+ s = s.strip()
+ if len(s) <= max_len:
+ return [s]
+ sentences = re.split(r'(?<=[\.\?\!])\s+', s)
+ chunks = []
+ cur = ""
+ for sent in sentences:
+ if len(cur) + len(sent) + 1 <= max_len:
+ cur = (cur + " " + sent).strip()
+ else:
+ if cur:
+ chunks.append(cur)
+ if len(sent) <= max_len:
+ cur = sent
+ else:
+ # hard split long sentence
+ for i in range(0, len(sent), max_len):
+ chunks.append(sent[i:i+max_len])
+ cur = ""
+ if cur:
+ chunks.append(cur)
+ return chunks
+
+# -------------------------
+# LearningAgent
+# -------------------------
+class LearningAgent:
+ """
+ Continuous Learning Agent.
+
+ Primary public methods:
+ - ingest_interaction(session_id, conversation_text, meta) -> IngestResult
+ - apply_feedback(interaction_doc_id, corrected_label, note=None)
+ - export_finetune_jsonl(path, limit=None) -> path
+ - propose_updates(sample_size=200) -> dict (human-readable plan)
+ - get_label_distribution(window=1000) -> dict
+ """
+
+ def __init__(
+ self,
+ mongo_uri: str,
+ db_name: str = "mas_db",
+ kb_collection: str = "knowledge_base",
+ index_name: str = "kb_index",
+ embedding_model: str = DEFAULT_EMBED_MODEL,
+ llm_model: str = DEFAULT_LLM_MODEL,
+ ):
+ self.mongo_uri = mongo_uri
+ self.db_name = db_name
+ self.kb_collection = kb_collection
+ self.index_name = index_name
+ self.embedding = OpenAIEmbeddings(openai_api_key=key_param.openai_api_key)
+ self.llm = ChatOpenAI(model=llm_model, openai_api_key=key_param.openai_api_key, temperature=0.0)
+ self.embed_model_name = embedding_model
+
+ # Ensure indexes on Mongo side for efficient queries (idempotent)
+ with MongoClient(self.mongo_uri) as c:
+ db = c[self.db_name]
+ coll = db[self.kb_collection]
+ coll.create_index([("created_at", ASCENDING)])
+ coll.create_index([("metadata.session_id", ASCENDING)])
+
+ # -------------------------
+ # Core ingestion
+ # -------------------------
+ def ingest_interaction(
+ self,
+ session_id: str,
+ conversation_text: str,
+ metadata: Optional[Dict[str, Any]] = None,
+ keep_raw: bool = False,
+ ) -> IngestResult:
+ """
+ Ingest a single session's conversation into the vector KB.
+
+ - anonymizes the text
+ - chunks it
+ - computes embeddings (batch)
+ - upserts documents into self.kb_collection with fields:
+ { content, embedding, metadata, created_at }
+ Returns IngestResult with inserted ids count.
+ """
+ metadata = metadata or {}
+ anonymized = _anonymize_text(conversation_text)
+ chunks = _chunk_text(anonymized, CHUNK_SIZE)
+ if not chunks:
+ return IngestResult(inserted_count=0, last_ids=[])
+
+ # compute embeddings
+ embeddings = self.embedding.embed_documents(chunks)
+
+ docs = []
+ ts = datetime.utcnow()
+ for i, chunk in enumerate(chunks):
+ doc = {
+ "content": chunk,
+ "embedding": embeddings[i],
+ "metadata": {
+ "session_id": session_id,
+ "source": metadata.get("source", "conversation"),
+ "labels": metadata.get("labels", {}), # classifier / monitoring metadata if any
+ "keep_raw": bool(keep_raw),
+ },
+ "created_at": ts,
+ }
+ docs.append(doc)
+
+ client = MongoClient(self.mongo_uri)
+ try:
+ coll = client[self.db_name][self.kb_collection]
+ res = coll.insert_many(docs)
+ return IngestResult(inserted_count=len(res.inserted_ids), last_ids=res.inserted_ids)
+ finally:
+ client.close()
+
+ # -------------------------
+ # Feedback / labeling
+ # -------------------------
+ def apply_feedback(
+ self,
+ doc_id,
+ corrected_label: str,
+ note: Optional[str] = None,
+ ) -> Dict[str, Any]:
+ """
+ Apply explicit feedback to a KB document (e.g. classifier was wrong).
+ Stores feedback in a dedicated collection for traceability and also updates
+ the document's metadata 'labels.corrected'.
+ """
+ client = MongoClient(self.mongo_uri)
+ try:
+ db = client[self.db_name]
+ coll = db[self.kb_collection]
+ fb_coll = db[FT_BUFFER_COLLECTION]
+
+ # update doc metadata
+ upd = coll.find_one_and_update(
+ {"_id": doc_id},
+ {
+ "$set": {"metadata.labels.corrected": corrected_label, "metadata.labels.corrected_at": datetime.utcnow()}
+ },
+ return_document=True,
+ )
+
+ fb = {
+ "interaction_doc_id": doc_id,
+ "corrected_label": corrected_label,
+ "note": note or "",
+ "timestamp": datetime.utcnow(),
+ }
+ fb_id = fb_coll.insert_one(fb).inserted_id
+
+ return {"updated_doc": bool(upd), "feedback_id": fb_id}
+ finally:
+ client.close()
+
+ # -------------------------
+ # Fine-tune exports
+ # -------------------------
+ def export_finetune_jsonl(self, out_path: str, limit: Optional[int] = 1000) -> str:
+ """
+ Export labeled examples suitable for human review / fine-tuning.
+
+ Schema (OpenAI-style JSONL): each line is {"prompt": "...", "completion": " ..."}
+ We build instruction-response pairs from KB items that have 'labels' in metadata.
+
+ Returns the path to the written file.
+ """
+ client = MongoClient(self.mongo_uri)
+ try:
+ coll = client[self.db_name][self.kb_collection]
+ # Prefer items that have corrected labels first, then items with original labels
+ cursor = coll.find(
+ {"$or": [{"metadata.labels.corrected": {"$exists": True}}, {"metadata.labels": {"$exists": True}}]}
+ ).sort("created_at", ASCENDING).limit(limit)
+
+ lines = []
+ for doc in cursor:
+ content = doc.get("content", "")
+ labels = doc.get("metadata", {}).get("labels", {})
+ corrected = labels.get("corrected")
+ original = labels.get("classifier") or labels.get("detected_label") or labels.get("depression_label")
+ label = corrected or original
+ if not label:
+ continue
+ prompt = (
+ "You are a supportive mental-health assistant. Given the anonymized user text below, "
+ "produce a concise empathetic reply that reflects appropriate tone (no diagnosis). "
+ "Also include a suggested 'next action' token in square brackets, one of: [ask_phq], [encourage], [provide_self_help], [escalate].\n\n"
+ "User text:\n"
+ f"{content}\n\n"
+ "Reply:"
+ )
+ completion = f"{label} ||| SuggestedAction: {labels.get('suggested_action','[encourage]')}"
+ lines.append(json.dumps({"prompt": prompt, "completion": completion}))
+
+ # write file
+ os.makedirs(os.path.dirname(out_path) or ".", exist_ok=True)
+ with open(out_path, "w", encoding="utf-8") as f:
+ for l in lines:
+ f.write(l + "\n")
+ return out_path
+ finally:
+ client.close()
+
+ # -------------------------
+ # Simple analysis / proposals
+ # -------------------------
+ def propose_updates(self, sample_size: int = 200) -> Dict[str, Any]:
+ """
+ Produce a short plan (human-readable) describing:
+ - top 3 recurring user concerns (from KB content)
+ - suggested prompt/template changes for the therapy agent
+ - count of labeled corrections awaiting review
+
+ Returns a dict with keys: summary, suggestions, counts
+ """
+ client = MongoClient(self.mongo_uri)
+ try:
+ coll = client[self.db_name][self.kb_collection]
+ # sample recent docs
+ cursor = coll.find({}).sort("created_at", -1).limit(sample_size)
+ texts = []
+ correction_count = 0
+ for d in cursor:
+ texts.append(d.get("content", "")[:1200])
+ if d.get("metadata", {}).get("labels", {}).get("corrected"):
+ correction_count += 1
+ joined = "\n\n".join(texts[: max(1, min(30, len(texts)))])
+
+ # Ask the LLM to summarize recurring themes and give concrete suggestions
+ prompt = (
+ "You are an analyst. Given anonymized conversation snippets, return a STRICT JSON with keys:\n"
+ "- top_themes: list of up to 5 short theme strings\n"
+ "- therapy_agent_prompt_changes: short list of suggested prompt/template edits (1-3)\n"
+ "- high_priority_issues: short list (e.g. 'frequent suicidal ideation mentions')\n\n"
+ "Snippets:\n" + joined
+ )
+ resp = self.llm.invoke([{"role": "user", "content": prompt}])
+ raw = resp.content.strip()
+ parsed = None
+ try:
+ parsed = json.loads(raw)
+ except Exception:
+ # best-effort extract
+ m = re.search(r"\{.*\}", raw, flags=re.DOTALL)
+ if m:
+ try:
+ parsed = json.loads(m.group(0))
+ except Exception:
+ parsed = {"error": "could not parse llm output", "raw": raw[:400]}
+
+ return {
+ "summary": parsed,
+ "counts": {"sample_size": sample_size, "corrections_waiting_review": correction_count},
+ "generated_at": datetime.utcnow().isoformat(),
+ }
+ finally:
+ client.close()
+
+ # -------------------------
+ # Label distribution / drift
+ # -------------------------
+ def get_label_distribution(self, window: int = 1000) -> Dict[str, int]:
+ """
+ Compute distribution of classifier labels in recent KB docs.
+ Expects metadata.labels.classifier OR metadata.labels.detected_label present.
+ """
+ client = MongoClient(self.mongo_uri)
+ try:
+ coll = client[self.db_name][self.kb_collection]
+ cursor = coll.find({"metadata.labels": {"$exists": True}}).sort("created_at", -1).limit(window)
+ dist = {}
+ total = 0
+ for d in cursor:
+ labels = d.get("metadata", {}).get("labels", {})
+ label = labels.get("corrected") or labels.get("classifier") or labels.get("detected_label")
+ if not label:
+ label = "unspecified"
+ dist[label] = dist.get(label, 0) + 1
+ total += 1
+ dist["__total__"] = total
+ return dist
+ finally:
+ client.close()
+
+ # -------------------------
+ # Convenience: quick in-place re-embedding (if model changed)
+ # -------------------------
+ def reembed_collection(self, batch_size: int = 256, resume_after_id: Optional[Any] = None) -> Dict[str, Any]:
+ """
+ Recompute and update embeddings for documents in KB using current embedding model.
+ This is a synchronous operation and may be slow; use with care.
+
+ Returns summary {updated: n, skipped: m}.
+ """
+ client = MongoClient(self.mongo_uri)
+ updated = 0
+ skipped = 0
+ try:
+ coll = client[self.db_name][self.kb_collection]
+ query = {}
+ if resume_after_id is not None:
+ query["_id"] = {"$gt": resume_after_id}
+ cursor = coll.find(query).sort("_id", ASCENDING)
+ to_process = []
+ ids = []
+ for doc in cursor:
+ if "content" not in doc:
+ skipped += 1
+ continue
+ to_process.append(doc["content"])
+ ids.append(doc["_id"])
+ if len(to_process) >= batch_size:
+ embs = self.embedding.embed_documents(to_process)
+ for i, _id in enumerate(ids):
+ coll.update_one({"_id": _id}, {"$set": {"embedding": embs[i], "reembed_at": datetime.utcnow()}})
+ updated += 1
+ to_process = []
+ ids = []
+ # leftover
+ if to_process:
+ embs = self.embedding.embed_documents(to_process)
+ for i, _id in enumerate(ids):
+ coll.update_one({"_id": _id}, {"$set": {"embedding": embs[i], "reembed_at": datetime.utcnow()}})
+ updated += 1
+ return {"updated": updated, "skipped": skipped}
+ finally:
+ client.close()
diff --git a/ExtraCodes/tmp/ft_export.jsonl b/ExtraCodes/tmp/ft_export.jsonl
new file mode 100644
index 00000000..9f45f868
--- /dev/null
+++ b/ExtraCodes/tmp/ft_export.jsonl
@@ -0,0 +1 @@
+{"prompt": "You are a supportive mental-health assistant. Given the anonymized user text below, produce a concise empathetic reply that reflects appropriate tone (no diagnosis). Also include a suggested 'next action' token in square brackets, one of: [ask_phq], [encourage], [provide_self_help], [escalate].\n\nUser text:\nHi, I've been feeling very down lately and have trouble sleeping. Sometimes I don't want to talk to anyone. My email is [email_removed]\n\nReply:", "completion": "No Depression Signs Detected ||| SuggestedAction: [encourage]"}
diff --git a/MonitoringAgent/AgentTool/monitoringTool.py b/MonitoringAgent/AgentTool/monitoringTool.py
new file mode 100644
index 00000000..42a122e4
--- /dev/null
+++ b/MonitoringAgent/AgentTool/monitoringTool.py
@@ -0,0 +1,23 @@
+# LangAgents/monitoring_tool.py
+from langchain.tools import BaseTool
+import requests
+import aiohttp
+
+class GetUserSummaryTool(BaseTool):
+ name = "get_user_summary"
+ description = "Fetch the aggregate monitoring summary for a specific user_id from the monitoring FastAPI service."
+
+ def _run(self, user_id: str):
+ url = f"http://localhost:8000/monitor/summary/{user_id}/aggregate"
+ response = requests.get(url)
+ if response.status_code == 200:
+ return response.json()
+ return f"Error {response.status_code}: {response.text}"
+
+ async def _arun(self, user_id: str):
+ url = f"http://localhost:8000/monitor/summary/{user_id}/aggregate"
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url) as resp:
+ if resp.status == 200:
+ return await resp.json()
+ return f"Error {resp.status}: {await resp.text()}"
diff --git a/MonitoringAgent/AgentTool/monitoring_agent.py b/MonitoringAgent/AgentTool/monitoring_agent.py
new file mode 100644
index 00000000..9be461b2
--- /dev/null
+++ b/MonitoringAgent/AgentTool/monitoring_agent.py
@@ -0,0 +1,19 @@
+# LangAgents/monitoring_agent.py
+from langchain.agents import initialize_agent, AgentType
+from langchain.chat_models import ChatOpenAI
+from .monitoringTool import GetUserSummaryTool
+
+def create_monitoring_agent():
+ tool = GetUserSummaryTool()
+ tools = [tool]
+
+ # Replace this with your OpenAI API key or other model setup
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
+
+ agent = initialize_agent(
+ tools=tools,
+ llm=llm,
+ agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
+ verbose=True
+ )
+ return agent
diff --git a/MonitoringAgent/AgentTool/ochestrator.py b/MonitoringAgent/AgentTool/ochestrator.py
new file mode 100644
index 00000000..5b8ff612
--- /dev/null
+++ b/MonitoringAgent/AgentTool/ochestrator.py
@@ -0,0 +1,11 @@
+# LangAgents/orchestrator.py
+from .monitoring_agent import create_monitoring_agent
+
+def test_monitoring():
+ agent = create_monitoring_agent()
+ query = "Get the monitoring summary for user123"
+ response = agent.run(query)
+ print(response)
+
+if __name__ == "__main__":
+ test_monitoring()
diff --git a/MonitoringAgent/db_handler.py b/MonitoringAgent/db_handler.py
new file mode 100644
index 00000000..1657acd2
--- /dev/null
+++ b/MonitoringAgent/db_handler.py
@@ -0,0 +1,63 @@
+from motor.motor_asyncio import AsyncIOMotorClient
+from datetime import datetime
+import os
+
+MONGO_URL = os.getenv("MONGO_URL", "mongodb://localhost:27017")
+client = AsyncIOMotorClient(MONGO_URL)
+db = client["depression_monitoring"]
+reports_collection = db["reports"]
+
+async def save_report(data: dict):
+ if isinstance(data.get("timestamp"), str):
+ data["timestamp"] = datetime.fromisoformat(data["timestamp"].replace("Z", ""))
+ await reports_collection.insert_one(data)
+
+async def get_reports_by_user(user_id: str):
+ cursor = reports_collection.find({"user_id": user_id}).sort("timestamp", -1)
+ results = []
+ async for doc in cursor:
+ doc["_id"] = str(doc["_id"])
+ results.append(doc)
+ return results
+
+async def get_user_summary(user_id: str):
+ """Compute average depression confidence, last therapy, last assessment date"""
+ pipeline = [
+ {"$match": {"user_id": user_id}},
+ {"$sort": {"timestamp": -1}},
+ {
+ "$group": {
+ "_id": "$user_id",
+ "avg_confidence": {
+ "$avg": {
+ "$cond": [
+ {"$ifNull": ["$data.depression_confidence", False]},
+ "$data.depression_confidence",
+ None
+ ]
+ }
+ },
+ "last_therapy": {
+ "$first": {
+ "$cond": [
+ {"$eq": ["$agent_name", "therapy"]},
+ "$data.therapy_type",
+ None
+ ]
+ }
+ },
+ "last_assessment": {
+ "$first": {
+ "$cond": [
+ {"$eq": ["$agent_name", "assessment"]},
+ "$timestamp",
+ None
+ ]
+ }
+ },
+ "total_reports": {"$sum": 1}
+ }
+ }
+ ]
+ result = await reports_collection.aggregate(pipeline).to_list(length=1)
+ return result[0] if result else None
diff --git a/MonitoringAgent/monitor_api.py b/MonitoringAgent/monitor_api.py
new file mode 100644
index 00000000..79c742e2
--- /dev/null
+++ b/MonitoringAgent/monitor_api.py
@@ -0,0 +1,40 @@
+from fastapi import APIRouter, HTTPException
+from pydantic import BaseModel, Field
+from datetime import datetime
+from .db_handler import save_report, get_reports_by_user, get_user_summary
+
+router = APIRouter(
+ prefix="/monitor",
+ tags=["Monitoring Agent"]
+)
+
+class Report(BaseModel):
+ agent_name: str
+ user_id: str
+ timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
+ data: dict
+
+@router.post("/report")
+async def receive_report(report: Report):
+ await save_report(report.dict())
+ return {"message": "Report stored in MongoDB"}
+
+@router.get("/summary/{user_id}")
+async def get_user_logs(user_id: str):
+ reports = await get_reports_by_user(user_id)
+ if not reports:
+ raise HTTPException(status_code=404, detail="No reports found for user.")
+ return {"user_id": user_id, "reports": reports}
+
+@router.get("/summary/{user_id}/aggregate")
+async def get_user_aggregate(user_id: str):
+ summary = await get_user_summary(user_id)
+ if not summary:
+ raise HTTPException(status_code=404, detail="No summary data available.")
+ return {
+ "user_id": user_id,
+ "average_depression_confidence": round(summary.get("avg_confidence", 0), 2),
+ "last_therapy": summary.get("last_therapy"),
+ "last_assessment": summary.get("last_assessment"),
+ "total_reports": summary.get("total_reports", 0)
+ }
diff --git a/PHQ9/PHQ9_Student_Depression_Dataset_Updated.xlsx b/PHQ9/PHQ9_Student_Depression_Dataset_Updated.xlsx
new file mode 100644
index 00000000..42d7855d
Binary files /dev/null and b/PHQ9/PHQ9_Student_Depression_Dataset_Updated.xlsx differ
diff --git a/Sinhala/sin.html b/Sinhala/sin.html
new file mode 100644
index 00000000..dd2f351e
--- /dev/null
+++ b/Sinhala/sin.html
@@ -0,0 +1,56 @@
+
+
+
+
+ Sinhala Chat Input
+
+
+
+
+
+ Type in Sinhala (Transliteration)
+
+
+
+
diff --git a/learningAgentTest.py b/learningAgentTest.py
new file mode 100644
index 00000000..ad0aad86
--- /dev/null
+++ b/learningAgentTest.py
@@ -0,0 +1,49 @@
+from LearningAgent.LearningAgent import LearningAgent
+from bson import ObjectId
+import key_param
+
+# Initialize the agent
+agent = LearningAgent(mongo_uri=key_param.MONGO_URI, db_name="learning_test_db")
+
+# ---------- STEP 1: Test ingestion ----------
+text = """
+Hi, I've been feeling very down lately and have trouble sleeping.
+Sometimes I don't want to talk to anyone. My email is user123@gmail.com
+"""
+
+result = agent.ingest_interaction(
+ session_id="test_session_1",
+ conversation_text=text,
+ metadata={
+ "source": "classifier",
+ "labels": {"classifier": "Depression Signs Detected", "confidence": 85}
+ }
+)
+print("✅ Ingestion Result:", result.dict())
+
+# ---------- STEP 2: Verify anonymization ----------
+from pymongo import MongoClient
+
+with MongoClient(key_param.MONGO_URI) as client:
+ doc = client["learning_test_db"]["knowledge_base"].find_one({"metadata.session_id": "test_session_1"})
+ print("\n📄 Stored Document Content:\n", doc["content"])
+ assert "[email_removed]" in doc["content"], "❌ Email was not anonymized!"
+
+# ---------- STEP 3: Apply feedback ----------
+feedback = agent.apply_feedback(
+ doc_id=doc["_id"],
+ corrected_label="No Depression Signs Detected",
+ note="Classifier overestimated sadness"
+)
+print("\n✅ Feedback Result:", feedback)
+
+# ---------- STEP 4: Export fine-tune JSONL ----------
+path = agent.export_finetune_jsonl("tmp/ft_export.jsonl", limit=5)
+print("\n✅ Fine-tune data exported to:", path)
+
+# ---------- STEP 5: Distribution & summary ----------
+dist = agent.get_label_distribution()
+print("\n📊 Label Distribution:", dist)
+
+plan = agent.propose_updates(sample_size=5)
+print("\n🧾 Proposed Update Plan:", plan)
diff --git a/main.py b/main.py
index 9cf99c25..13a2483c 100644
--- a/main.py
+++ b/main.py
@@ -12,10 +12,11 @@
from LevelDetection.router.levelDetection import router as level_detection_router
#from textChatMode.assesmentAgent.routes import router as agent_router
from CountingGame.game import router as game_router
-
+from MonitoringAgent.monitor_api import router as monitor_router
+from therapyAgent.therapyAgent import router as therapy_router
+from monitoringAgentSystem.monotoring_agent_system import router as monitoring_router
app = FastAPI()
-
# Enable CORS
app.add_middleware(
CORSMiddleware,
@@ -32,6 +33,10 @@
app.include_router(level_detection_router)
#app.include_router(agent_router)
app.include_router(game_router)
+app.include_router(monitor_router)
+app.include_router(therapy_router)
+app.include_router(monitoring_router)
+
@app.get("/")
diff --git a/monitoringAgentSystem/monotoring_agent_system.py b/monitoringAgentSystem/monotoring_agent_system.py
new file mode 100644
index 00000000..ff478338
--- /dev/null
+++ b/monitoringAgentSystem/monotoring_agent_system.py
@@ -0,0 +1,90 @@
+# monitorAgent/router/monitorAgent.py
+from fastapi import APIRouter
+from pydantic import BaseModel
+from datetime import datetime
+from pymongo import MongoClient
+from langchain_openai import ChatOpenAI
+import key_param
+from fastapi import Query
+from typing import List
+router = APIRouter(prefix="/monitor-agent", tags=["Monitor Agent"])
+
+class AgentActivity(BaseModel):
+ agent_name: str # "chat", "classifier", "therapy"
+ user_id: int
+ session_id: int
+ input_data: dict
+ output_data: dict
+ timestamp: datetime = datetime.utcnow()
+
+class MonitorFeedbackRequest(BaseModel):
+ recent_activities: list[AgentActivity]
+
+@router.post("/track-activity")
+async def track_agent_activity(activity: AgentActivity):
+ """
+ Logs an agent's activity in MongoDB for monitoring.
+ """
+ client = MongoClient(key_param.MONGO_URI)
+ db = client["blissMe"]
+ collection = db["agent_activity_logs"]
+ collection.insert_one(activity.dict())
+ client.close()
+ return {"status": "logged", "agent": activity.agent_name}
+
+
+@router.post("/analyze-feedback")
+async def analyze_feedback(req: MonitorFeedbackRequest):
+ """
+ Analyzes multiple agents’ behaviors and generates feedback using an LLM.
+ """
+ activities_summary = "\n".join([
+ f"{a.timestamp} | {a.agent_name.upper()} | Input: {a.input_data} | Output: {a.output_data}"
+ for a in req.recent_activities
+ ])
+
+ prompt = f"""
+You are a monitoring supervisor analyzing AI agent behavior.
+
+Here are the latest activities of agents:
+{activities_summary}
+
+Your job:
+- Detect inconsistencies or performance issues.
+- Identify agents that might have repeated or conflicting outputs.
+- Give feedback in short, clear bullet points.
+
+Provide feedback for each agent.
+"""
+
+ llm = ChatOpenAI(model="gpt-3.5-turbo", openai_api_key=key_param.openai_api_key)
+ response = llm.invoke([{"role": "user", "content": prompt}])
+
+ return {"feedback": response.content.strip()}
+
+@router.get("/get-session-events")
+async def get_session_events(user_id: int = Query(...), session_id: int = Query(...)):
+ """
+ Retrieve all events for a given user and session.
+ """
+ client = MongoClient(key_param.MONGO_URI)
+ db = client["blissMe"]
+ collection = db["agent_activity_logs"]
+
+ events_cursor = collection.find(
+ {"user_id": user_id, "session_id": session_id}
+ ).sort("timestamp", 1)
+
+ events = []
+ for event in events_cursor:
+ event["_id"] = str(event["_id"])
+ if isinstance(event.get("timestamp"), datetime):
+ event["timestamp"] = event["timestamp"].isoformat()
+ events.append(event)
+
+ client.close()
+ return {
+ "user_id": user_id,
+ "session_id": session_id,
+ "events": events
+ }
diff --git a/textChatMode/chat.py b/textChatMode/chat.py
index 6ded1e92..0770b720 100644
--- a/textChatMode/chat.py
+++ b/textChatMode/chat.py
@@ -8,6 +8,8 @@
from utils.tts import generate_tts_audio
import key_param
from fastapi.responses import FileResponse
+from datetime import datetime
+import requests
router = APIRouter()
@@ -61,6 +63,8 @@ class QueryRequest(BaseModel):
history: str
summaries: list[str] = []
asked_phq_ids: list[int] = []
+ user_id: int
+ session_id: int
@router.post("/ask")
async def ask_question(data: QueryRequest):
@@ -83,55 +87,50 @@ async def ask_question(data: QueryRequest):
context_texts = [doc.page_content[:500] for doc in similar_docs]
summary_text = "\n".join(data.summaries) if data.summaries else "No previous summaries available."
- user_turns = [l for l in data.history.splitlines() if l.lower().startswith(("you:", "user:"))]
- early_stage = len(user_turns) < 3 # start PHQ only after 3 chats
+ unasked_questions = [q for q in PHQ9_QUESTIONS if q["id"] not in data.asked_phq_ids]
+ next_phq_q = unasked_questions[0] if unasked_questions else None
- unasked = [q for q in PHQ9_QUESTIONS if q["id"] not in data.asked_phq_ids]
- next_q = unasked[0] if unasked else None
-
-# Only enable PHQ mode if user has chatted at least 3 times
- phq_mode = False
- if not early_stage and next_q:
- phq_mode = True
-
- # --- Build PHQ instruction if needed ---
+ # Determine if we are in early stage (first 2 turns)
+ user_turns = [line for line in data.history.splitlines() if line.lower().startswith("you:") or line.lower().startswith("user:")]
+ early_stage = len(user_turns) < 3
+
phq_instruction = ""
- if phq_mode:
+ if next_phq_q and not early_stage:
if not data.asked_phq_ids:
- # Before first PHQ question
- phq_instruction = (
- "You may now gently say something like:\n"
- '"To better understand how you’re doing, I’d like to ask a few short questions about how you’ve felt in the past two weeks."\n'
- "Then ask this first question exactly as shown (do NOT paraphrase):\n"
- f'- "{next_q["meaning"]}"\n\n'
- "After the user replies, respond with one short caring line (eg. “Thank you for sharing.” / “I understand, that sounds tough.” / “I understand.”) and move to the next PHQ-9 question in order.\n"
- "Ask only one PHQ question per message.\n"
- "User can reply with: not at all, several days, more than half the days, nearly every day."
- )
+ phq_instruction += f"""
+You may now gently say something like:
+"To better understand how you're doing, I'd like to ask a few short questions on how you feel in past two weeks."
+
+Then ask this question:
+- "{next_phq_q['question']}" (meaning: {next_phq_q['meaning']})
+"""
else:
- # For later PHQ questions
- phq_instruction = (
- "Respond with one short caring line (eg. “Thank you for sharing.” / “I understand, that sounds tough.” / “I understand.”) acknowledging the user’s last answer, then immediately ask the next PHQ-9 question exactly as shown below (do NOT paraphrase):\n"
- f'- "{next_q["meaning"]}"\n\n'
- "Do not add unrelated or extra questions.\n"
- "Ask only one PHQ question per message.\n"
- "User can reply with: not at all, several days, more than half the days, nearly every day."
- )
+ phq_instruction += f"""
+Continue with the next question:
+- "{next_phq_q['question']}" (meaning: {next_phq_q['meaning']})
+"""
+
+ phq_instruction += """
+Make your response short and caring. Don't explain too much. No repetition. Only ask one PHQ-9 question per message.
+Let user respond with:
+- not at all
+- several days
+- more than half the days
+- nearly every day
+"""
chat_prompt = f"""
You are a friendly chatbot who talks like a kind friend.
-- Be warm and caring. Avoid long or repetitive responses. Never say the same supportive line more than once.
-
-- Your job is to gently explore how the user feels and try to understand user by asking questions.
+Be warm and caring. Avoid long or repetitive responses. Never say the same supportive line more than once.
-- NEVER mention PHQ-9 or say "I cannot help you".
+Your job is to gently explore how the user feels and try to understand user by asking questions, and ask PHQ-9 questions naturally when ready.
-- Avoid medical or crisis terms unless directly asked.
+NEVER mention PHQ-9 or say "I cannot help you".
-- Keep your replies short and friendly. One question per message. Once PHQ-9 starts, go through them without pausing.
+Avoid medical or crisis terms unless directly asked.
-- After finishing all 9, continue chatting normally with care and empathy.
+Keep your replies short and friendly. One question per message. Once PHQ-9 starts, go through them without pausing.
Past summaries:
{summary_text}
@@ -140,42 +139,71 @@ async def ask_question(data: QueryRequest):
{context_texts}
Conversation history:
-{history}
-{phq_mode}
{phq_instruction}
-User just said: "{query}"
-
-Now reply like a kind friend:
-"""
-
- bot = ChatOpenAI(
- model="gpt-3.5-turbo",
- openai_api_key=key_param.openai_api_key,
- temperature=0.7
- )
chat_response = bot.invoke([
- {"role": "system", "content": chat_prompt }
+ {"role": "system", "content": chat_prompt}
])
final_text = chat_response.content.strip()
client.close()
- matched_q = next_q if phq_mode else None
- if not unasked: # all 9 done
- matched_q = None
- phq_mode = False
+ matched_q = next_phq_q if not early_stage else None
audio_path = generate_tts_audio(final_text)
+ # ----------------------
+ # PHQ-9 Progress
+ # ----------------------
+ total_phq9 = len(PHQ9_QUESTIONS)
+ answered_phq9 = len(data.asked_phq_ids)
+ phq9_progress = round((answered_phq9 / total_phq9) * 100, 2)
+ phq9_started = bool(data.asked_phq_ids)
+ phq9_completed = not unasked_questions
+
+ # ----------------------
+ # Send activity log to Monitor Agent
+ # ----------------------
+ try:
+ monitor_payload = {
+ "agent_name": "chat",
+ "user_id": data.user_id,
+ "session_id": data.session_id,
+ "input_data": {
+ "user_query": query,
+ "history": history,
+ "summaries": data.summaries,
+ "asked_phq_ids": data.asked_phq_ids
+ },
+ "output_data": {
+ "response": final_text,
+ "phq9_questionID": matched_q["id"] if matched_q else None,
+ "phq9_question": matched_q["question"] if matched_q else None,
+ "phq9_started": phq9_started,
+ "phq9_completed": phq9_completed,
+ "phq9_progress": phq9_progress
+ },
+ "timestamp": datetime.utcnow().isoformat()
+ }
+
+ response = requests.post(
+ "http://localhost:8000/monitor-agent/track-activity",
+ json=monitor_payload,
+ timeout=15
+ )
+ print("✅ Logged chat activity to Monitor Agent:", response)
+ except Exception as e:
+ print("⚠️ Failed to send log to Monitor Agent:", e)
+
return {
"response": final_text,
- "audio_url": f"/voice-audio?path={audio_path}",
+ "audio_url": f"/voice-audio?path={audio_path}",
"phq9_questionID": matched_q["id"] if matched_q else None,
"phq9_question": matched_q["question"] if matched_q else None,
- "lanuage": "English"
+ "phq9_progress": phq9_progress,
+ "language": "English"
}
@router.get("/voice-audio")
diff --git a/therapyAgent/therapyAgent.py b/therapyAgent/therapyAgent.py
new file mode 100644
index 00000000..7996b0bb
--- /dev/null
+++ b/therapyAgent/therapyAgent.py
@@ -0,0 +1,97 @@
+from fastapi import APIRouter
+from pydantic import BaseModel
+from pymongo import MongoClient
+from langchain_openai import ChatOpenAI
+from datetime import datetime
+import key_param
+from .utils.therapy_selector import get_therapy_recommendation
+from .utils.history_tracker import save_therapy_history, get_user_therapy_history
+
+router = APIRouter(prefix="/therapy-agent", tags=["Therapy Agent"])
+
+class TherapyRequest(BaseModel):
+ user_query: str
+ depression_level: str
+ user_id: str
+ session_id: str
+ session_summaries: list[str] = []
+
+@router.post("/chat")
+async def therapy_chat(data: TherapyRequest):
+ """
+ Therapy Agent main route:
+ Handles chat, suggests therapies, tracks user progress.
+ """
+
+ client = MongoClient(key_param.MONGO_URI)
+ db = client["blissMe"]
+
+ # Fetch therapy history
+ history_records = get_user_therapy_history(db, data.user_id)
+ recent_history = "\n".join(
+ [f"{h['therapy_name']} on {h['date']} (duration {h['duration']} mins)" for h in history_records]
+ ) if history_records else "No prior therapies found."
+
+ # Suggest new therapy
+ therapy_suggestion = get_therapy_recommendation(db, data.depression_level, history_records)
+
+ # Base prompt
+ prompt = f"""
+You are a friendly therapy assistant designed to support users with {data.depression_level} depression.
+You talk like a warm and caring friend. don't always suggest therapies, suggestwhen appropriate based on the user's emotional state other times KEEP CHATTING as caring friend BUT your main duty is suggesting therapies.
+
+Current user history:
+{recent_history}
+
+If the user has moderate or minimal depression, suggest small helpful activities or therapies from the system.
+Therapies can include relaxation breathing, mindfulness, journaling, or gratitude reflection.
+don't use log sentences. keep it short and simple.
+don't mention about depression level or depression to the user.
+
+
+If a therapy matches one from the system, gently ask:
+"Would you like to start the {therapy_suggestion['name']} therapy now?"
+
+If the user agrees, return:
+ACTION:START_THERAPY:{therapy_suggestion['id']}
+
+Otherwise, continue gentle conversation and emotional support.
+
+User message: "{data.user_query}"
+"""
+
+ bot = ChatOpenAI(model="gpt-3.5-turbo", openai_api_key=key_param.openai_api_key)
+ response = bot.invoke([{"role": "user", "content": prompt}])
+
+ reply_text = response.content.strip()
+ action_detected = None
+ is_therapy_suggested = False
+
+
+ if f"start the {therapy_suggestion['name']} therapy" in reply_text.lower():
+ is_therapy_suggested = True
+
+
+ if "ACTION:START_THERAPY" in reply_text:
+ action_detected = reply_text.split("ACTION:START_THERAPY:")[-1].strip()
+ save_therapy_history(
+ db,
+ data.user_id,
+ data.session_id,
+ therapy_suggestion["name"],
+ therapy_suggestion["id"]
+ )
+ is_therapy_suggested = False
+
+
+ client.close()
+
+ return {
+ "response": reply_text.replace("ACTION:START_THERAPY", "").strip(),
+ "action": "START_THERAPY" if action_detected else None,
+ "therapy_id": action_detected,
+ "therapy_name": therapy_suggestion["name"] if action_detected else None,
+ "therapy_path": therapy_suggestion.get("path"),
+ "isTherapySuggested": is_therapy_suggested,
+}
+
diff --git a/therapyAgent/utils/history_tracker.py b/therapyAgent/utils/history_tracker.py
new file mode 100644
index 00000000..1313ddc2
--- /dev/null
+++ b/therapyAgent/utils/history_tracker.py
@@ -0,0 +1,18 @@
+from datetime import datetime
+
+def save_therapy_history(db, user_id, session_id, therapy_name, therapy_id):
+ history_collection = db["TherapyHistory"]
+ record = {
+ "user_id": user_id,
+ "session_id": session_id,
+ "therapy_id": therapy_id,
+ "therapy_name": therapy_name,
+ "date": datetime.utcnow(),
+ "duration": None,
+ "feedback": None
+ }
+ history_collection.insert_one(record)
+
+def get_user_therapy_history(db, user_id):
+ history_collection = db["TherapyHistory"]
+ return list(history_collection.find({"user_id": user_id}).sort("date", -1))
diff --git a/therapyAgent/utils/seedTherapies.js b/therapyAgent/utils/seedTherapies.js
new file mode 100644
index 00000000..48bda7dc
--- /dev/null
+++ b/therapyAgent/utils/seedTherapies.js
@@ -0,0 +1,120 @@
+
+
+const defaultTherapies = [
+ {
+ therapyID: "T001",
+ name: "Anxiety_Games",
+ applicableLevel: "Moderate",
+ description:
+ "A fun game-based therapy to reduce anxiety levels through interactive relaxation challenges.",
+ path: "/therapy/Game/Anxiety_Games.tsx",
+ durationMinutes: 15,
+ },
+ {
+ therapyID: "T002",
+ name: "breathing-game",
+ applicableLevel: "Moderate",
+ description:
+ "A breathing control game designed to synchronize breathing patterns with calming visuals.",
+ path: "/therapy/breathing-game.tsx",
+ durationMinutes: 10,
+ },
+ {
+ therapyID: "T003",
+ name: "BreathingExercise",
+ applicableLevel: "Minimal",
+ description:
+ "Simple guided breathing exercises to ease mild anxiety and enhance focus.",
+ path: "/therapy/BreathingExercise.tsx",
+ durationMinutes: 8,
+ },
+ {
+ therapyID: "T004",
+ name: "forest-game",
+ applicableLevel: "Moderate",
+ description:
+ "Nature-themed mindfulness therapy to immerse the user in calming virtual forest experiences.",
+ path: "/therapy/forest-game.tsx",
+ durationMinutes: 12,
+ },
+ {
+ therapyID: "T005",
+ name: "LogMood",
+ applicableLevel: "Minimal",
+ description:
+ "A daily mood logging therapy that helps users track emotions and patterns for better awareness.",
+ path: "/therapy/LogMood.tsx",
+ durationMinutes: 5,
+ },
+ {
+ therapyID: "T006",
+ name: "MeditationPlayer",
+ applicableLevel: "Moderate",
+ description:
+ "A meditation player that provides guided sessions for stress management and inner peace.",
+ path: "/therapy/MeditationPlayer.tsx",
+ durationMinutes: 15,
+ },
+ {
+ therapyID: "T007",
+ name: "MoodTracker",
+ applicableLevel: "Minimal",
+ description:
+ "An interactive mood tracking module to monitor emotional states over time.",
+ path: "/therapy/MoodTracker.tsx",
+ durationMinutes: 7,
+ },
+ {
+ therapyID: "T008",
+ name: "MoodTrackerMain",
+ applicableLevel: "Minimal",
+ description:
+ "Main dashboard for mood tracking and visualization of emotional trends.",
+ path: "/therapy/MoodTrackerMain.tsx",
+ durationMinutes: 10,
+ },
+ {
+ therapyID: "T009",
+ name: "ocean-waves",
+ applicableLevel: "Moderate",
+ description:
+ "Audio-visual therapy simulating ocean waves for deep relaxation and mindfulness.",
+ path: "/therapy/ocean-waves.tsx",
+ durationMinutes: 10,
+ },
+ {
+ therapyID: "T010",
+ name: "zen-garden",
+ applicableLevel: "Severe",
+ description:
+ "A virtual Zen garden experience for reflection, focus, and cognitive grounding.",
+ path: "/therapy/zen-garden.tsx",
+ durationMinutes: 20,
+ },
+];
+
+async function seedTherapies() {
+ try {
+ await mongoose.connect(MONGO_URI, {
+ useNewUrlParser: true,
+ useUnifiedTopology: true,
+ });
+
+ console.log(" Connected to MongoDB");
+
+ // Clear existing therapies before seeding
+ await Therapy.deleteMany({});
+ console.log(" Cleared existing therapies");
+
+ // Insert new ones
+ await Therapy.insertMany(defaultTherapies);
+ console.log(" Default therapies inserted successfully!");
+
+ process.exit(0);
+ } catch (error) {
+ console.error(" Error seeding therapies:", error);
+ process.exit(1);
+ }
+}
+
+seedTherapies();
diff --git a/therapyAgent/utils/therapy_selector.py b/therapyAgent/utils/therapy_selector.py
new file mode 100644
index 00000000..ccba58e4
--- /dev/null
+++ b/therapyAgent/utils/therapy_selector.py
@@ -0,0 +1,113 @@
+import random
+
+default_therapies = [
+ {
+ "therapyID": "T001",
+ "name": "Anxiety_Games",
+ "applicableLevel": "Moderate",
+ "description": "A fun game-based therapy to reduce anxiety levels through interactive relaxation challenges.",
+ "path": "/dash/anxiety",
+ "durationMinutes": 15,
+ },
+ {
+ "therapyID": "T002",
+ "name": "breathing-game",
+ "applicableLevel": "Moderate",
+ "description": "A breathing control game designed to synchronize breathing patterns with calming visuals.",
+ "path": "/dash/anxiety",
+ "durationMinutes": 10,
+ },
+ {
+ "therapyID": "T003",
+ "name": "BreathingExercise",
+ "applicableLevel": "Minimal",
+ "description": "Simple guided breathing exercises to ease mild anxiety and enhance focus.",
+ "path": "therapy/breathing",
+ "durationMinutes": 8,
+ },
+ {
+ "therapyID": "T004",
+ "name": "forest-game",
+ "applicableLevel": "Moderate",
+ "description": "Nature-themed mindfulness therapy to immerse the user in calming virtual forest experiences.",
+ "path": "/dash/anxiety",
+ "durationMinutes": 12,
+ },
+ {
+ "therapyID": "T005",
+ "name": "LogMood",
+ "applicableLevel": "Minimal",
+ "description": "A daily mood logging therapy that helps users track emotions and patterns for better awareness.",
+ "path": "/therapy/mood-tracker-home",
+ "durationMinutes": 5,
+ },
+ {
+ "therapyID": "T006",
+ "name": "MeditationPlayer",
+ "applicableLevel": "Moderate",
+ "description": "A meditation player that provides guided sessions for stress management and inner peace.",
+ "path": "therapy/medication",
+ "durationMinutes": 15,
+ },
+ {
+ "therapyID": "T007",
+ "name": "MoodTracker",
+ "applicableLevel": "Minimal",
+ "description": "An interactive mood tracking module to monitor emotional states over time.",
+ "path": "/therapy/mood-tracker-home",
+ "durationMinutes": 7,
+ },
+ {
+ "therapyID": "T008",
+ "name": "Number Guessing Game",
+ "applicableLevel": "Minimal",
+ "description": "A fun and engaging number guessing game to distract and entertain users.",
+ "path": "game/therapy_game",
+ "durationMinutes": 10,
+ },
+ {
+ "therapyID": "T009",
+ "name": "ocean-waves",
+ "applicableLevel": "Moderate",
+ "description": "Audio-visual therapy simulating ocean waves for deep relaxation and mindfulness.",
+ "path": "/dash/anxiety",
+ "durationMinutes": 10,
+ },
+ {
+ "therapyID": "T010",
+ "name": "zen-garden",
+ "applicableLevel": "Severe",
+ "description": "A virtual Zen garden experience for reflection, focus, and cognitive grounding.",
+ "path": "/dash/anxiety",
+ "durationMinutes": 20,
+ },
+]
+
+
+def get_therapy_recommendation(db, depression_level, history_records):
+ """
+ Select therapy from default list based on depression level and past usage.
+ """
+ # Filter by applicable level
+ matching = [
+ t for t in default_therapies
+ if t["applicableLevel"].lower() in [depression_level.lower(), "general"]
+ ]
+
+ # If none match, fallback to all
+ if not matching:
+ matching = default_therapies
+
+ # Avoid recently used therapies
+ used_ids = [h["therapy_id"] for h in history_records]
+ available = [t for t in matching if t["therapyID"] not in used_ids]
+
+ # Pick a random one
+ selected = random.choice(available or matching)
+
+ return {
+ "id": selected["therapyID"],
+ "name": selected["name"],
+ "description": selected["description"],
+ "path": selected["path"]
+ }