Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,4 @@ UVAI_Digital_Refinery_Blueprint.pdf
*.db
.vercel
.env*.local
.next/
18 changes: 18 additions & 0 deletions apps/web/src/app/api/video/route.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { NextResponse } from 'next/server';
import { publishEvent, EventTypes } from '@/lib/cloudevents';

// Backend URL with validation - skip if not a valid URL
const rawBackendUrl = process.env.BACKEND_URL || '';
Expand All @@ -23,14 +24,18 @@ function getBaseUrl(request: Request): string {
* and /api/extract-events serverless functions directly.
*/
export async function POST(request: Request) {
let videoUrl: string | undefined;
try {
const body = await request.json();
const { url } = body;
videoUrl = url;

if (!url) {
return NextResponse.json({ error: 'Video URL is required' }, { status: 400 });
}

await publishEvent(EventTypes.VIDEO_RECEIVED, { url }, url);

// ── Strategy 1: Full backend pipeline (skip if no backend configured) ──
if (BACKEND_AVAILABLE) {
try {
Expand Down Expand Up @@ -85,6 +90,8 @@ export async function POST(request: Request) {
project_scaffold: transcriptAction.project_scaffold || null,
};

await publishEvent(EventTypes.PIPELINE_COMPLETED, { strategy: 'backend', success: result.success, agents: result.orchestration_meta?.agents_used || [] }, url);

return NextResponse.json({
id: `vid_${Date.now().toString(36)}`,
status: result.success ? 'complete' : 'failed',
Expand Down Expand Up @@ -116,6 +123,7 @@ export async function POST(request: Request) {
let transcript = '';
let transcriptSource = 'none';
try {
await publishEvent(EventTypes.TRANSCRIPT_STARTED, { url, strategy: 'frontend' }, url);
const baseUrl = getBaseUrl(request);
const transcribeRes = await fetch(`${baseUrl}/api/transcribe`, {
method: 'POST',
Expand All @@ -126,6 +134,7 @@ export async function POST(request: Request) {
if (transcribeResult.success && transcribeResult.transcript) {
transcript = transcribeResult.transcript;
transcriptSource = transcribeResult.source || 'frontend';
await publishEvent(EventTypes.TRANSCRIPT_COMPLETED, { source: transcriptSource, wordCount: transcript.split(/\s+/).length }, url);
}
} catch (e) {
console.error('Transcript extraction failed:', e);
Expand All @@ -135,6 +144,7 @@ export async function POST(request: Request) {
let extraction: { events?: Array<{ type: string; title: string; description?: string; timestamp?: string; priority?: string }>; actions?: Array<{ title: string }>; summary?: string; topics?: string[] } = {};
if (transcript) {
try {
await publishEvent(EventTypes.EXTRACTION_STARTED, { transcriptLength: transcript.length }, url);
const baseUrl = getBaseUrl(request);
const extractRes = await fetch(`${baseUrl}/api/extract-events`, {
method: 'POST',
Expand All @@ -144,6 +154,7 @@ export async function POST(request: Request) {
const extractResult = await extractRes.json();
if (extractResult.success && extractResult.data) {
extraction = extractResult.data;
await publishEvent(EventTypes.EXTRACTION_COMPLETED, { events: extraction.events?.length || 0, actions: extraction.actions?.length || 0 }, url);
}
} catch (e) {
console.error('Event extraction failed:', e);
Expand All @@ -152,6 +163,12 @@ export async function POST(request: Request) {

const hasResults = transcript.length > 0;

await publishEvent(
hasResults ? EventTypes.PIPELINE_COMPLETED : EventTypes.PIPELINE_FAILED,
{ strategy: 'frontend', success: hasResults, transcriptSource },
url,
);

return NextResponse.json({
id: `vid_${Date.now().toString(36)}`,
status: hasResults ? 'complete' : 'failed',
Expand All @@ -176,6 +193,7 @@ export async function POST(request: Request) {
});
} catch (error) {
console.error('Video analysis error:', error);
await publishEvent(EventTypes.PIPELINE_FAILED, { error: String(error) }, videoUrl).catch(() => {});
return NextResponse.json(
{ error: 'Failed to analyze video', details: String(error) },
{ status: 500 },
Expand Down
37 changes: 37 additions & 0 deletions apps/web/src/hooks/use-builtin-ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
'use client';

import { useEffect, useState } from 'react';
import {
type BuiltInAICapabilities,
checkCapabilities,
summarizeTranscript,
extractEventsLocal,
} from '@/lib/services/builtin-ai';

/**
* React hook exposing Chrome Built-in AI capabilities.
*
* Usage:
* ```tsx
* const { available, summarize, extractEvents } = useBuiltInAI();
* if (available.promptAPI) {
* const summary = await summarize(transcript);
* }
* ```
*/
export function useBuiltInAI() {
const [available, setAvailable] = useState<BuiltInAICapabilities>({
promptAPI: false,
summarizerAPI: false,
});

useEffect(() => {
checkCapabilities().then(setAvailable);
}, []);

return {
available,
summarize: summarizeTranscript,
extractEvents: extractEventsLocal,
};
}
81 changes: 81 additions & 0 deletions apps/web/src/lib/cloudevents.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/**
* CloudEvents v1.0 publisher for the Next.js frontend pipeline.
*
* Emits standardized events at each video processing stage so that
* downstream consumers (Pub/Sub, webhooks, file sink) can react.
*
* When no backend is configured the events are written to a local
* JSONL file (`/tmp/cloudevents.jsonl`) for observability.
Comment on lines +7 to +8
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This comment, and the one on line 54, states that events are written to /tmp/cloudevents.jsonl. However, the implementation on line 80 only logs events to the console. To avoid confusion for future developers, please update the comments to accurately describe the code's behavior.

Suggested change
* When no backend is configured the events are written to a local
* JSONL file (`/tmp/cloudevents.jsonl`) for observability.
* When no webhook is configured, events are logged to the console for
* observability, which is useful in serverless environments like Vercel.

*/
Comment on lines +1 to +9
Copy link

Copilot AI Feb 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The module docstring/comments claim events are appended to /tmp/cloudevents.jsonl when no backend/webhook is configured, but the implementation only console.logs and never writes a JSONL file. Either implement the file sink behavior (server-side only) or update the comments to match the actual behavior so operators aren’t misled.

Copilot uses AI. Check for mistakes.

export interface CloudEvent {
id: string;
source: string;
specversion: '1.0';
type: string;
time: string;
subject?: string;
datacontenttype: string;
data: Record<string, unknown>;
}

function makeEvent(
type: string,
data: Record<string, unknown>,
subject?: string,
): CloudEvent {
return {
id: crypto.randomUUID(),
source: '/eventrelay/api/video',
specversion: '1.0',
type,
time: new Date().toISOString(),
subject,
datacontenttype: 'application/json',
data,
};
}

// Event types following CloudEvents naming convention
export const EventTypes = {
VIDEO_RECEIVED: 'com.eventrelay.video.received',
TRANSCRIPT_STARTED: 'com.eventrelay.transcript.started',
TRANSCRIPT_COMPLETED: 'com.eventrelay.transcript.completed',
EXTRACTION_STARTED: 'com.eventrelay.extraction.started',
EXTRACTION_COMPLETED: 'com.eventrelay.extraction.completed',
PIPELINE_COMPLETED: 'com.eventrelay.pipeline.completed',
PIPELINE_FAILED: 'com.eventrelay.pipeline.failed',
} as const;

/**
* Publish a CloudEvent.
*
* - If WEBHOOK_URL is set → POST to that URL
* - Otherwise → append to /tmp/cloudevents.jsonl (dev/Vercel)
*/
export async function publishEvent(
type: string,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The type parameter is currently a generic string. For better type safety and to prevent passing invalid event types, you can create a specific type from your EventTypes object and use that in the function signature.

First, add this type definition after your EventTypes constant (e.g., on line 49):

export type EventType = (typeof EventTypes)[keyof typeof EventTypes];

Then, you can use it here.

Suggested change
type: string,
type: EventType,

data: Record<string, unknown>,
subject?: string,
): Promise<void> {
const event = makeEvent(type, data, subject);

const webhookUrl = process.env.CLOUDEVENTS_WEBHOOK_URL;

if (webhookUrl) {
try {
await fetch(webhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/cloudevents+json',
},
body: JSON.stringify(event),
});
Comment on lines +65 to +73
Copy link

Copilot AI Feb 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

publishEvent performs an outbound fetch without any timeout/abort signal. In serverless/edge runtimes a slow or hanging webhook can unnecessarily delay the video pipeline (and you call publishEvent multiple times per request). Consider adding an AbortController timeout and/or making webhook publishing explicitly best-effort so it can’t hold the main request open.

Copilot uses AI. Check for mistakes.
Comment on lines +67 to +73
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

security-medium medium

The publishEvent function uses fetch to send events to a webhook URL without a timeout. Since this function is called multiple times and awaited in the main API route (/api/video), a slow or hanging webhook can block the API response, potentially leading to a Denial of Service (DoS) of the serverless function or worker process. Implementing a timeout for the fetch call is recommended to ensure system resilience.

} catch (e) {
console.warn('[CloudEvents] Webhook publish failed:', e);
}
}

// Always log the event for observability
console.log(`[CloudEvent] ${type}`, JSON.stringify({ id: event.id, subject }));
Comment on lines +67 to +80
Copy link

Copilot AI Feb 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fetch(webhookUrl, …) won’t throw on non-2xx responses, so webhook delivery failures (e.g., 400/500) will be silently treated as success. Consider checking response.ok / status and logging a warning (and possibly including the CloudEvent id/type) when the webhook responds with an error.

Suggested change
await fetch(webhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/cloudevents+json',
},
body: JSON.stringify(event),
});
} catch (e) {
console.warn('[CloudEvents] Webhook publish failed:', e);
}
}
// Always log the event for observability
console.log(`[CloudEvent] ${type}`, JSON.stringify({ id: event.id, subject }));
const response = await fetch(webhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/cloudevents+json',
},
body: JSON.stringify(event),
});
if (!response.ok) {
console.warn(
'[CloudEvents] Webhook responded with error status',
{
status: response.status,
statusText: response.statusText,
webhookUrl,
eventId: event.id,
eventType: event.type,
},
);
}
} catch (e) {
console.warn('[CloudEvents] Webhook publish failed:', e, {
webhookUrl,
eventId: event.id,
eventType: event.type,
});
}
}
// Always log the event for observability
console.log(
`[CloudEvent] ${type}`,
JSON.stringify({ id: event.id, subject }),
);

Copilot uses AI. Check for mistakes.
}
158 changes: 158 additions & 0 deletions apps/web/src/lib/services/builtin-ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
/**
* Chrome Built-in AI — client-side fallback for video analysis.
*
* Uses the Chrome Prompt API (`LanguageModel.create()`) and
* Summarizer API to perform on-device text processing when
* server-side API keys are unavailable or the user is offline.
*
* Reference: Chrome Built-in AI Early Preview Program
* @see https://developer.chrome.com/docs/ai/built-in
*/

/* eslint-disable @typescript-eslint/no-explicit-any */

Comment on lines +12 to +13
Copy link

Copilot AI Feb 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

/* eslint-disable @typescript-eslint/no-explicit-any */ is applied file-wide, but this file doesn’t currently use any. Removing the disable (or narrowing it to the specific line(s) that need it) will keep linting effective.

Suggested change
/* eslint-disable @typescript-eslint/no-explicit-any */

Copilot uses AI. Check for mistakes.
// Type declarations for Chrome Built-in AI APIs (not yet in lib.dom.d.ts)
declare global {
interface Window {
ai?: {
languageModel?: {
capabilities(): Promise<{ available: 'no' | 'after-download' | 'readily' }>;
create(options?: {
systemPrompt?: string;
temperature?: number;
topK?: number;
}): Promise<LanguageModelSession>;
};
summarizer?: {
capabilities(): Promise<{ available: 'no' | 'after-download' | 'readily' }>;
create(options?: {
type?: 'tl;dr' | 'key-points' | 'teaser' | 'headline';
format?: 'plain-text' | 'markdown';
length?: 'short' | 'medium' | 'long';
}): Promise<SummarizerSession>;
};
};
}
}

interface LanguageModelSession {
prompt(input: string): Promise<string>;
promptStreaming(input: string): ReadableStream<string>;
destroy(): void;
}

interface SummarizerSession {
summarize(input: string): Promise<string>;
destroy(): void;
}

export interface BuiltInAICapabilities {
promptAPI: boolean;
summarizerAPI: boolean;
}

/**
* Check which Chrome Built-in AI APIs are available.
*/
export async function checkCapabilities(): Promise<BuiltInAICapabilities> {
const result: BuiltInAICapabilities = { promptAPI: false, summarizerAPI: false };

if (typeof window === 'undefined' || !window.ai) return result;

try {
const lm = await window.ai.languageModel?.capabilities();
result.promptAPI = lm?.available === 'readily';
} catch { /* not available */ }

try {
const sm = await window.ai.summarizer?.capabilities();
result.summarizerAPI = sm?.available === 'readily';
} catch { /* not available */ }

return result;
}

/**
* Summarize transcript text using the on-device Summarizer API.
* Falls back to the Prompt API if Summarizer is unavailable.
*/
export async function summarizeTranscript(
transcript: string,
): Promise<string | null> {
if (typeof window === 'undefined' || !window.ai) return null;

// Try Summarizer API first
if (window.ai.summarizer) {
try {
const caps = await window.ai.summarizer.capabilities();
if (caps.available === 'readily') {
const session = await window.ai.summarizer.create({
type: 'key-points',
format: 'markdown',
length: 'medium',
});
try {
return await session.summarize(transcript);
} finally {
session.destroy();
}
}
} catch (e) {
console.warn('[BuiltInAI] Summarizer failed:', e);
}
}

// Fall back to Prompt API
return promptExtract(
transcript,
'Summarize the following video transcript into key points in markdown format.',
);
}

/**
* Extract events/actions from transcript using the on-device Prompt API.
*/
export async function extractEventsLocal(
transcript: string,
): Promise<string | null> {
return promptExtract(
transcript,
`Analyze this video transcript and extract:
1. Key events (what happened, when)
2. Action items (tasks, next steps)
3. Topics discussed
4. Overall sentiment

Return as JSON with keys: events, actions, topics, sentiment.`,
);
}

/**
* Low-level Prompt API call with automatic session lifecycle.
*/
async function promptExtract(
text: string,
systemPrompt: string,
): Promise<string | null> {
if (typeof window === 'undefined' || !window.ai?.languageModel) return null;

try {
const caps = await window.ai.languageModel.capabilities();
if (caps.available !== 'readily') return null;

const session = await window.ai.languageModel.create({
systemPrompt,
temperature: 0.3,
topK: 3,
});

try {
return await session.prompt(text);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

security-medium medium

The promptExtract function directly passes untrusted transcript text to the Chrome Built-in AI model without any sanitization or framing. An attacker can craft a video transcript that, when processed by the AI, causes it to generate malicious content or bypass intended constraints. This is a classic prompt injection vulnerability in an LLM-based feature. While this is a client-side AI feature, the manipulated output could still impact the user experience or lead to other vulnerabilities like XSS if the output is rendered unsafely in the UI.

} finally {
session.destroy();
}
} catch (e) {
console.warn('[BuiltInAI] Prompt API failed:', e);
return null;
}
}
Loading
Loading