diff --git a/.gitignore b/.gitignore index 1b7fc5032..7f80a4482 100644 --- a/.gitignore +++ b/.gitignore @@ -155,3 +155,4 @@ UVAI_Digital_Refinery_Blueprint.pdf *.db .vercel .env*.local +.next/ diff --git a/apps/web/src/app/api/video/route.ts b/apps/web/src/app/api/video/route.ts index 7bb2a2dec..42cf8ad62 100644 --- a/apps/web/src/app/api/video/route.ts +++ b/apps/web/src/app/api/video/route.ts @@ -1,4 +1,5 @@ import { NextResponse } from 'next/server'; +import { publishEvent, EventTypes } from '@/lib/cloudevents'; // Backend URL with validation - skip if not a valid URL const rawBackendUrl = process.env.BACKEND_URL || ''; @@ -23,14 +24,18 @@ function getBaseUrl(request: Request): string { * and /api/extract-events serverless functions directly. */ export async function POST(request: Request) { + let videoUrl: string | undefined; try { const body = await request.json(); const { url } = body; + videoUrl = url; if (!url) { return NextResponse.json({ error: 'Video URL is required' }, { status: 400 }); } + await publishEvent(EventTypes.VIDEO_RECEIVED, { url }, url); + // ── Strategy 1: Full backend pipeline (skip if no backend configured) ── if (BACKEND_AVAILABLE) { try { @@ -85,6 +90,8 @@ export async function POST(request: Request) { project_scaffold: transcriptAction.project_scaffold || null, }; + await publishEvent(EventTypes.PIPELINE_COMPLETED, { strategy: 'backend', success: result.success, agents: result.orchestration_meta?.agents_used || [] }, url); + return NextResponse.json({ id: `vid_${Date.now().toString(36)}`, status: result.success ? 'complete' : 'failed', @@ -116,6 +123,7 @@ export async function POST(request: Request) { let transcript = ''; let transcriptSource = 'none'; try { + await publishEvent(EventTypes.TRANSCRIPT_STARTED, { url, strategy: 'frontend' }, url); const baseUrl = getBaseUrl(request); const transcribeRes = await fetch(`${baseUrl}/api/transcribe`, { method: 'POST', @@ -126,6 +134,7 @@ export async function POST(request: Request) { if (transcribeResult.success && transcribeResult.transcript) { transcript = transcribeResult.transcript; transcriptSource = transcribeResult.source || 'frontend'; + await publishEvent(EventTypes.TRANSCRIPT_COMPLETED, { source: transcriptSource, wordCount: transcript.split(/\s+/).length }, url); } } catch (e) { console.error('Transcript extraction failed:', e); @@ -135,6 +144,7 @@ export async function POST(request: Request) { let extraction: { events?: Array<{ type: string; title: string; description?: string; timestamp?: string; priority?: string }>; actions?: Array<{ title: string }>; summary?: string; topics?: string[] } = {}; if (transcript) { try { + await publishEvent(EventTypes.EXTRACTION_STARTED, { transcriptLength: transcript.length }, url); const baseUrl = getBaseUrl(request); const extractRes = await fetch(`${baseUrl}/api/extract-events`, { method: 'POST', @@ -144,6 +154,7 @@ export async function POST(request: Request) { const extractResult = await extractRes.json(); if (extractResult.success && extractResult.data) { extraction = extractResult.data; + await publishEvent(EventTypes.EXTRACTION_COMPLETED, { events: extraction.events?.length || 0, actions: extraction.actions?.length || 0 }, url); } } catch (e) { console.error('Event extraction failed:', e); @@ -152,6 +163,12 @@ export async function POST(request: Request) { const hasResults = transcript.length > 0; + await publishEvent( + hasResults ? EventTypes.PIPELINE_COMPLETED : EventTypes.PIPELINE_FAILED, + { strategy: 'frontend', success: hasResults, transcriptSource }, + url, + ); + return NextResponse.json({ id: `vid_${Date.now().toString(36)}`, status: hasResults ? 'complete' : 'failed', @@ -176,6 +193,7 @@ export async function POST(request: Request) { }); } catch (error) { console.error('Video analysis error:', error); + await publishEvent(EventTypes.PIPELINE_FAILED, { error: String(error) }, videoUrl).catch(() => {}); return NextResponse.json( { error: 'Failed to analyze video', details: String(error) }, { status: 500 }, diff --git a/apps/web/src/hooks/use-builtin-ai.ts b/apps/web/src/hooks/use-builtin-ai.ts new file mode 100644 index 000000000..f198dc4a6 --- /dev/null +++ b/apps/web/src/hooks/use-builtin-ai.ts @@ -0,0 +1,37 @@ +'use client'; + +import { useEffect, useState } from 'react'; +import { + type BuiltInAICapabilities, + checkCapabilities, + summarizeTranscript, + extractEventsLocal, +} from '@/lib/services/builtin-ai'; + +/** + * React hook exposing Chrome Built-in AI capabilities. + * + * Usage: + * ```tsx + * const { available, summarize, extractEvents } = useBuiltInAI(); + * if (available.promptAPI) { + * const summary = await summarize(transcript); + * } + * ``` + */ +export function useBuiltInAI() { + const [available, setAvailable] = useState({ + promptAPI: false, + summarizerAPI: false, + }); + + useEffect(() => { + checkCapabilities().then(setAvailable); + }, []); + + return { + available, + summarize: summarizeTranscript, + extractEvents: extractEventsLocal, + }; +} diff --git a/apps/web/src/lib/cloudevents.ts b/apps/web/src/lib/cloudevents.ts new file mode 100644 index 000000000..98bcbbe18 --- /dev/null +++ b/apps/web/src/lib/cloudevents.ts @@ -0,0 +1,81 @@ +/** + * CloudEvents v1.0 publisher for the Next.js frontend pipeline. + * + * Emits standardized events at each video processing stage so that + * downstream consumers (Pub/Sub, webhooks, file sink) can react. + * + * When no backend is configured the events are written to a local + * JSONL file (`/tmp/cloudevents.jsonl`) for observability. + */ + +export interface CloudEvent { + id: string; + source: string; + specversion: '1.0'; + type: string; + time: string; + subject?: string; + datacontenttype: string; + data: Record; +} + +function makeEvent( + type: string, + data: Record, + subject?: string, +): CloudEvent { + return { + id: crypto.randomUUID(), + source: '/eventrelay/api/video', + specversion: '1.0', + type, + time: new Date().toISOString(), + subject, + datacontenttype: 'application/json', + data, + }; +} + +// Event types following CloudEvents naming convention +export const EventTypes = { + VIDEO_RECEIVED: 'com.eventrelay.video.received', + TRANSCRIPT_STARTED: 'com.eventrelay.transcript.started', + TRANSCRIPT_COMPLETED: 'com.eventrelay.transcript.completed', + EXTRACTION_STARTED: 'com.eventrelay.extraction.started', + EXTRACTION_COMPLETED: 'com.eventrelay.extraction.completed', + PIPELINE_COMPLETED: 'com.eventrelay.pipeline.completed', + PIPELINE_FAILED: 'com.eventrelay.pipeline.failed', +} as const; + +/** + * Publish a CloudEvent. + * + * - If WEBHOOK_URL is set → POST to that URL + * - Otherwise → append to /tmp/cloudevents.jsonl (dev/Vercel) + */ +export async function publishEvent( + type: string, + data: Record, + subject?: string, +): Promise { + const event = makeEvent(type, data, subject); + + const webhookUrl = process.env.CLOUDEVENTS_WEBHOOK_URL; + + if (webhookUrl) { + try { + await fetch(webhookUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/cloudevents+json', + }, + body: JSON.stringify(event), + }); + } catch (e) { + console.warn('[CloudEvents] Webhook publish failed:', e); + } + } + + // Always log the event for observability + console.log(`[CloudEvent] ${type}`, JSON.stringify({ id: event.id, subject })); +} diff --git a/apps/web/src/lib/services/builtin-ai.ts b/apps/web/src/lib/services/builtin-ai.ts new file mode 100644 index 000000000..231bd749c --- /dev/null +++ b/apps/web/src/lib/services/builtin-ai.ts @@ -0,0 +1,158 @@ +/** + * Chrome Built-in AI — client-side fallback for video analysis. + * + * Uses the Chrome Prompt API (`LanguageModel.create()`) and + * Summarizer API to perform on-device text processing when + * server-side API keys are unavailable or the user is offline. + * + * Reference: Chrome Built-in AI Early Preview Program + * @see https://developer.chrome.com/docs/ai/built-in + */ + +/* eslint-disable @typescript-eslint/no-explicit-any */ + +// Type declarations for Chrome Built-in AI APIs (not yet in lib.dom.d.ts) +declare global { + interface Window { + ai?: { + languageModel?: { + capabilities(): Promise<{ available: 'no' | 'after-download' | 'readily' }>; + create(options?: { + systemPrompt?: string; + temperature?: number; + topK?: number; + }): Promise; + }; + summarizer?: { + capabilities(): Promise<{ available: 'no' | 'after-download' | 'readily' }>; + create(options?: { + type?: 'tl;dr' | 'key-points' | 'teaser' | 'headline'; + format?: 'plain-text' | 'markdown'; + length?: 'short' | 'medium' | 'long'; + }): Promise; + }; + }; + } +} + +interface LanguageModelSession { + prompt(input: string): Promise; + promptStreaming(input: string): ReadableStream; + destroy(): void; +} + +interface SummarizerSession { + summarize(input: string): Promise; + destroy(): void; +} + +export interface BuiltInAICapabilities { + promptAPI: boolean; + summarizerAPI: boolean; +} + +/** + * Check which Chrome Built-in AI APIs are available. + */ +export async function checkCapabilities(): Promise { + const result: BuiltInAICapabilities = { promptAPI: false, summarizerAPI: false }; + + if (typeof window === 'undefined' || !window.ai) return result; + + try { + const lm = await window.ai.languageModel?.capabilities(); + result.promptAPI = lm?.available === 'readily'; + } catch { /* not available */ } + + try { + const sm = await window.ai.summarizer?.capabilities(); + result.summarizerAPI = sm?.available === 'readily'; + } catch { /* not available */ } + + return result; +} + +/** + * Summarize transcript text using the on-device Summarizer API. + * Falls back to the Prompt API if Summarizer is unavailable. + */ +export async function summarizeTranscript( + transcript: string, +): Promise { + if (typeof window === 'undefined' || !window.ai) return null; + + // Try Summarizer API first + if (window.ai.summarizer) { + try { + const caps = await window.ai.summarizer.capabilities(); + if (caps.available === 'readily') { + const session = await window.ai.summarizer.create({ + type: 'key-points', + format: 'markdown', + length: 'medium', + }); + try { + return await session.summarize(transcript); + } finally { + session.destroy(); + } + } + } catch (e) { + console.warn('[BuiltInAI] Summarizer failed:', e); + } + } + + // Fall back to Prompt API + return promptExtract( + transcript, + 'Summarize the following video transcript into key points in markdown format.', + ); +} + +/** + * Extract events/actions from transcript using the on-device Prompt API. + */ +export async function extractEventsLocal( + transcript: string, +): Promise { + return promptExtract( + transcript, + `Analyze this video transcript and extract: +1. Key events (what happened, when) +2. Action items (tasks, next steps) +3. Topics discussed +4. Overall sentiment + +Return as JSON with keys: events, actions, topics, sentiment.`, + ); +} + +/** + * Low-level Prompt API call with automatic session lifecycle. + */ +async function promptExtract( + text: string, + systemPrompt: string, +): Promise { + if (typeof window === 'undefined' || !window.ai?.languageModel) return null; + + try { + const caps = await window.ai.languageModel.capabilities(); + if (caps.available !== 'readily') return null; + + const session = await window.ai.languageModel.create({ + systemPrompt, + temperature: 0.3, + topK: 3, + }); + + try { + return await session.prompt(text); + } finally { + session.destroy(); + } + } catch (e) { + console.warn('[BuiltInAI] Prompt API failed:', e); + return null; + } +} diff --git a/src/youtube_extension/backend/api/v1/router.py b/src/youtube_extension/backend/api/v1/router.py index 9d18a758c..234170a8c 100644 --- a/src/youtube_extension/backend/api/v1/router.py +++ b/src/youtube_extension/backend/api/v1/router.py @@ -21,6 +21,14 @@ TranscriptActionWorkflow, ) +# CloudEvents integration (optional — falls back to file sink) +try: + from youtube_extension.integration.cloudevents_publisher import create_publisher as _create_publisher + + _ce_publisher = _create_publisher(backend="file") +except Exception: + _ce_publisher = None + # Import services from ...containers.service_container import get_service from ...services.cache_service import CacheService @@ -77,6 +85,21 @@ logger = logging.getLogger(__name__) + +async def _emit_event(event_type: str, data: dict, subject: str | None = None) -> None: + """Emit a CloudEvent if the publisher is available.""" + if _ce_publisher is not None: + try: + await _ce_publisher.publish( + source="/eventrelay/backend/v1", + type=event_type, + data=data, + subject=subject, + ) + except Exception as exc: + logger.debug("CloudEvent publish failed: %s", exc) + + # Create API v1 router router = APIRouter( prefix="/api/v1", @@ -524,10 +547,12 @@ async def process_video_v1( """Basic video processing endpoint""" try: logger.info(f"Video processing request: {request.video_url}") + await _emit_event("com.eventrelay.video.received", {"url": request.video_url}, request.video_url) result = await video_processing_service.process_video_basic( request.video_url, request.options ) + await _emit_event("com.eventrelay.pipeline.completed", {"url": request.video_url, "strategy": "backend"}, request.video_url) # Persist summary for analytics/storage if repository is available try: from youtube_extension.backend.repositories.video_repository import ( @@ -550,6 +575,7 @@ async def process_video_v1( except Exception as e: logger.error(f"Error in video processing: {e}") + await _emit_event("com.eventrelay.pipeline.failed", {"url": request.video_url, "error": str(e)}, request.video_url) raise HTTPException(status_code=500, detail=str(e))