Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -146,3 +146,6 @@ windsurf.md
.codeium/
.tabnine/
.kite/
.claude/
.mcp.json
AGENTS.md
9 changes: 9 additions & 0 deletions backend/lcfs/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,10 @@ class Settings(BaseSettings):
ches_client_secret: str = ""
ches_sender_email: str = "noreply@gov.bc.ca"
ches_sender_name: str = "LCFS Notification System"
ches_support_email: str = "lcfs@gov.bc.ca"

# Variable for LCFS Assistant Chat
rag_service_url: str = "http://localhost:1416"

def __init__(self, **kwargs):
# Map APP_ENVIRONMENT to environment if present
Expand Down Expand Up @@ -152,6 +156,11 @@ def redis_url(self) -> URL:
path=path,
)

# Chat service settings
OLLAMA_URL: str = "http://ollama:11434"
RAG_SERVICE_URL: str = "http://rag-llm:1416"
CHAT_RAG_ENABLED: bool = True

class Config:
env_file = ".env"
env_prefix = "LCFS_"
Expand Down
5 changes: 5 additions & 0 deletions backend/lcfs/web/api/chat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""Chat API with OpenAI compatibility."""

from lcfs.web.api.chat.views import router

__all__ = ["router"]
128 changes: 128 additions & 0 deletions backend/lcfs/web/api/chat/schemas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
"""OpenAI-compatible chat schemas."""

from typing import List, Optional, Literal
from pydantic import BaseModel, Field
import time


class ChatMessage(BaseModel):
"""A chat message in OpenAI format."""

role: Literal["user", "assistant", "system"]
content: str
name: Optional[str] = None


class ChatCompletionRequest(BaseModel):
"""OpenAI chat completion request format."""

messages: List[ChatMessage]
model: str = "lcfs-rag"
temperature: Optional[float] = Field(default=0.7, ge=0.0, le=2.0)
max_tokens: Optional[int] = Field(default=500, gt=0, le=2000)
stream: Optional[bool] = False
top_p: Optional[float] = Field(default=1.0, ge=0.0, le=1.0)
frequency_penalty: Optional[float] = Field(default=0.0, ge=-2.0, le=2.0)
presence_penalty: Optional[float] = Field(default=0.0, ge=-2.0, le=2.0)
stop: Optional[List[str]] = None
user: Optional[str] = None


class Usage(BaseModel):
"""Token usage information."""

prompt_tokens: int
completion_tokens: int
total_tokens: int


class ChatCompletionChoice(BaseModel):
"""A chat completion choice."""

index: int
message: ChatMessage
finish_reason: Optional[Literal["stop", "length", "content_filter"]] = None


class ChatCompletionResponse(BaseModel):
"""OpenAI chat completion response format."""

id: str
object: str = "chat.completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: List[ChatCompletionChoice]
usage: Optional[Usage] = None


# Streaming schemas
class ChatCompletionChunkDelta(BaseModel):
"""Delta object for streaming responses."""

role: Optional[Literal["assistant"]] = None
content: Optional[str] = None
metadata: Optional[dict] = None


class ChatCompletionChunkChoice(BaseModel):
"""A streaming chat completion choice."""

index: int
delta: ChatCompletionChunkDelta
finish_reason: Optional[Literal["stop", "length", "content_filter"]] = None


class ChatCompletionChunk(BaseModel):
"""OpenAI chat completion chunk for streaming."""

id: str
object: str = "chat.completion.chunk"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: List[ChatCompletionChunkChoice]


class ErrorDetail(BaseModel):
"""Error detail object."""

message: str
type: str
param: Optional[str] = None
code: Optional[str] = None


class ErrorResponse(BaseModel):
"""OpenAI-compatible error response."""

error: ErrorDetail


class EscalationRequest(BaseModel):
"""Support escalation request from the chat assistant."""

issue_type: str = Field(
...,
description="Type of issue: question, issue, feedback",
)
description: str = Field(..., description="User's description of their issue")
user_email: str = Field(..., description="User's email for response")
user_name: str = Field(..., description="User's name")
organization_name: Optional[str] = Field(
None, description="User's organization name"
)
organization_id: Optional[int] = Field(None, description="User's organization ID")
conversation_history: Optional[str] = Field(
None, description="Full conversation history with the assistant"
)
is_low_confidence: bool = Field(
False, description="Whether this escalation was triggered by low AI confidence"
)
submitted_at: str = Field(..., description="Timestamp of submission")


class EscalationResponse(BaseModel):
"""Response after submitting an escalation request."""

status: str
message: str
ticket_id: Optional[str] = None
42 changes: 42 additions & 0 deletions backend/lcfs/web/api/chat/services.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""Simplified chat service that forwards requests to RAG pipeline."""

from typing import Dict, Any
import httpx
import structlog

from lcfs.web.api.chat.schemas import ChatCompletionRequest
from lcfs.db.models.user import UserProfile
from lcfs.settings import settings

logger = structlog.get_logger(__name__)


class ChatService:
"""Simplified service that forwards chat requests to RAG pipeline."""

def __init__(self):
self.rag_service_url = settings.rag_service_url

async def create_completion(
self, request: ChatCompletionRequest, user: UserProfile
) -> Dict[str, Any]:
"""Forward chat completion request to the RAG service and return JSON response."""
messages = [msg.dict(exclude_none=True) for msg in request.messages]

try:
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(
f"{self.rag_service_url}/lcfs_rag/run",
json={"messages": messages},
)
response.raise_for_status()
rag_result = response.json()

return rag_result.get("result") or rag_result
except Exception as exc:
logger.error(
"chat_completion_error",
error=str(exc),
error_type=type(exc).__name__,
)
raise
187 changes: 187 additions & 0 deletions backend/lcfs/web/api/chat/views.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
"""Chat API endpoints with OpenAI compatibility."""

import uuid
from datetime import datetime
from typing import Any, Dict
from fastapi import APIRouter, Depends, HTTPException, Request
import structlog

from lcfs.web.api.chat.schemas import (
ChatCompletionRequest,
ErrorResponse,
EscalationRequest,
EscalationResponse,
)
from lcfs.web.api.chat.services import ChatService
from lcfs.web.api.email.services import CHESEmailService
from lcfs.db.models.user import UserProfile
from lcfs.web.core.decorators import view_handler
from lcfs.db.base import get_current_user
from lcfs.settings import settings

router = APIRouter()
logger = structlog.get_logger(__name__)


@router.post(
"/completions",
responses={
400: {"model": ErrorResponse},
500: {"model": ErrorResponse},
503: {"model": ErrorResponse},
},
)
@view_handler(["*"])
async def chat_completions(
request: Request,
chat_request: ChatCompletionRequest,
current_user: UserProfile = Depends(get_current_user),
) -> Dict[str, Any]:
"""
Create a chat completion using the RAG service and return
an OpenAI-compatible JSON response.

Args:
request: FastAPI Request object
chat_request: Chat completion request in OpenAI format
current_user: Current authenticated user

Returns:
Chat completion JSON response
"""
if not chat_request.messages:
raise HTTPException(
status_code=400, detail="messages field is required and cannot be empty"
)

# Validate messages
for i, message in enumerate(chat_request.messages):
if not message.content.strip():
raise HTTPException(
status_code=400, detail=f"Message at index {i} has empty content"
)

chat_service = ChatService()

result = await chat_service.create_completion(chat_request, current_user)
return result


@router.post(
"/escalate",
response_model=EscalationResponse,
responses={
400: {"model": ErrorResponse},
500: {"model": ErrorResponse},
},
)
@view_handler(["*"])
async def escalate_to_support(
request: Request,
escalation_request: EscalationRequest,
current_user: UserProfile = Depends(get_current_user),
email_service: CHESEmailService = Depends(),
) -> EscalationResponse:
"""
Escalate a chat conversation to support.

Sends the conversation history and user's issue to the support team.
"""
# Generate a ticket ID for tracking
ticket_id = (
f"LCFS-{datetime.now().strftime('%Y%m%d')}-{uuid.uuid4().hex[:6].upper()}"
)

# Format the issue type for display
issue_type_labels = {
"question": "General Question",
"issue": "Report an Issue",
"feedback": "Feedback",
}
issue_type_display = issue_type_labels.get(
escalation_request.issue_type, escalation_request.issue_type
)

# Build the email body
email_body = f"""
<h2>LCFS Assistant Support Request</h2>

<p><strong>Ticket ID:</strong> {ticket_id}</p>
<p><strong>Submitted:</strong> {escalation_request.submitted_at}</p>
<p><strong>Low Confidence Escalation:</strong> {"Yes" if escalation_request.is_low_confidence else "No"}</p>

<hr>

<h3>User Information</h3>
<p><strong>Name:</strong> {escalation_request.user_name}</p>
<p><strong>Email:</strong> {escalation_request.user_email}</p>
<p><strong>Organization:</strong> {escalation_request.organization_name or "N/A"}</p>
<p><strong>Organization ID:</strong> {escalation_request.organization_id or "N/A"}</p>

<hr>

<h3>Issue Details</h3>
<p><strong>Issue Type:</strong> {issue_type_display}</p>
<p><strong>Description:</strong></p>
<p>{escalation_request.description}</p>

<hr>

<h3>Conversation History</h3>
<pre style="background-color: #f5f5f5; padding: 15px; border-radius: 5px; white-space: pre-wrap; font-family: monospace;">
{escalation_request.conversation_history or "No conversation history available."}
</pre>
""".strip()

# Build email payload
email_payload = {
"bcc": [],
"bodyType": "html",
"body": email_body,
"cc": [],
"delayTS": 0,
"encoding": "utf-8",
"from": settings.ches_sender_email,
"priority": "normal",
"subject": f"[{ticket_id}] LCFS Assistant Support Request - {issue_type_display}",
"to": [settings.ches_support_email],
"tag": "lcfs-assistant-escalation",
}

try:
success = await email_service.send_email(email_payload)
if success:
logger.info(
"Escalation email sent successfully",
ticket_id=ticket_id,
user_email=escalation_request.user_email,
issue_type=escalation_request.issue_type,
)
return EscalationResponse(
status="success",
message="Your request has been submitted successfully.",
ticket_id=ticket_id,
)
else:
logger.warning(
"Escalation email sending returned False",
ticket_id=ticket_id,
user_email=escalation_request.user_email,
)
# Still return success to user since we don't want to block them
return EscalationResponse(
status="success",
message="Your request has been submitted. Our team will review it shortly.",
ticket_id=ticket_id,
)
except Exception as e:
logger.error(
"Failed to send escalation email",
error=str(e),
ticket_id=ticket_id,
user_email=escalation_request.user_email,
)
raise HTTPException(
status_code=500,
detail="Failed to submit your request. Please try again later.",
)
Loading