diff --git a/examples/next-fastapi/README.md b/examples/next-fastapi/README.md index e2578e2a78bd..779dc0cf7ed3 100644 --- a/examples/next-fastapi/README.md +++ b/examples/next-fastapi/README.md @@ -1,6 +1,6 @@ -# AI SDK, Next.js, and FastAPI Examples +# AI SDK, Next.js, and FastAPI Example -These examples show you how to use the [AI SDK](https://ai-sdk.dev/docs) with [Next.js](https://nextjs.org) and [FastAPI](https://fastapi.tiangolo.com). +This example show you how to use the [AI SDK](https://ai-sdk.dev/docs) with [Next.js](https://nextjs.org) and [FastAPI](https://fastapi.tiangolo.com). ## How to use @@ -18,18 +18,19 @@ yarn create next-app --example https://github.com/vercel/ai/tree/main/examples/n pnpm create next-app --example https://github.com/vercel/ai/tree/main/examples/next-fastapi next-fastapi-app ``` -You will also need [Python 3.6+](https://www.python.org/downloads) and [virtualenv](https://virtualenv.pypa.io/en/latest/installation.html) installed to run the FastAPI server. +You will also need [Python 3.6+](https://www.python.org/downloads) and create venv to run the FastAPI server. To run the example locally you need to: 1. Sign up at [OpenAI's Developer Platform](https://platform.openai.com/signup). 2. Go to [OpenAI's dashboard](https://platform.openai.com/account/api-keys) and create an API KEY. 3. Set the required environment variables as shown in [the example env file](./.env.local.example) but in a new file called `.env.local`. -4. `virtualenv venv` to create a python virtual environment. -5. `source venv/bin/activate` to activate the python virtual environment. +4. `python -m venv venv` to create a python virtual environment. +5. `venv\Scripts\activate` to activate the python virtual environment. 6. `pip install -r requirements.txt` to install the required python dependencies. +7. `uvicorn api.index:app --host 0.0.0.0 --port 8000 --reload` to run the FastAPI Server. 7. `pnpm install` to install the required dependencies. -8. `pnpm dev` to launch the development server. +8. `pnpm dev` to launch the Next.js development server on port 3000. ## Learn More diff --git a/examples/next-fastapi/api/index.py b/examples/next-fastapi/api/index.py index 3cdc9b190c7b..447a4d66c51f 100644 --- a/examples/next-fastapi/api/index.py +++ b/examples/next-fastapi/api/index.py @@ -1,26 +1,59 @@ -import os import json -from typing import List -from pydantic import BaseModel +import os +import uuid +from typing import Dict, List, Optional + from dotenv import load_dotenv from fastapi import FastAPI, Query +from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from openai import OpenAI -from .utils.prompt import ClientMessage, convert_to_openai_messages -from .utils.tools import get_current_weather +from openai._streaming import Stream +from openai.types.responses import ( + ResponseInputFileParam, + ResponseInputImageParam, + ResponseInputItemParam, + ResponseInputMessageContentListParam, + ResponseInputTextParam, + ResponseStreamEvent, + ToolParam, +) +from openai.types.responses.response_input_item_param import Message +from pydantic import BaseModel +from .schemas import MessageUI +from .system_prompt import REASONING_SYSTEM_PROMPT, SYSTEM_PROMPT +from .tools import get_current_weather load_dotenv(".env.local") app = FastAPI() +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins. In production, specify your frontend URL + allow_credentials=True, + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + client = OpenAI( api_key=os.environ.get("OPENAI_API_KEY"), ) class Request(BaseModel): - messages: List[ClientMessage] + messages: List[MessageUI] + model: str + search: bool = False + reasoning: bool = False + + +# Tool call state tracking +class ToolCallState(BaseModel): + id: str + name: str + arguments_buffer: str = "" available_tools = { @@ -28,108 +61,400 @@ class Request(BaseModel): } -def stream_text(messages: List[ClientMessage], protocol: str = 'data'): - stream = client.chat.completions.create( - messages=messages, - model="gpt-4o", - stream=True, - tools=[{ +def convert_to_openai_messages(messages: List[MessageUI], reasoning: bool) -> List[ResponseInputItemParam]: + processed_messages: list[ResponseInputItemParam] = [] + + system_prompt = REASONING_SYSTEM_PROMPT if reasoning else SYSTEM_PROMPT + + # Adding System Prompt at the beginning + processed_messages.append( + Message( + role="system", + content=[ + ResponseInputTextParam(text=system_prompt, type="input_text") + ], + ) + ) + + for _, msg in enumerate(messages): + content: ResponseInputMessageContentListParam = [] + + for _, part in enumerate(msg.parts): + if part.type == "text": + content.append( + ResponseInputTextParam(text=part.text, type="input_text") + ) + elif part.type == "data-citation": + try: + encoded = str(json.dumps(part.data)) + content.append( + ResponseInputTextParam(text=encoded, type="input_text") + ) + except Exception as e: + print(e) + pass + elif part.type == "file" and part.mediaType == "image/png": + sanitized_filename = 'file-' + (''.join(c for c in part.filename if c.isalnum() or c in ['_', '-']) if part.filename else str(uuid.uuid4())) + content.append( + ResponseInputImageParam( + detail="auto", + image_url=part.url, + file_id=sanitized_filename, + type="input_image", + ) + ) + elif part.type == "file": + content.append( + ResponseInputFileParam( + filename=str(part.filename), + file_url=part.url, + type="input_file", + ) + ) + else: + pass + + # OpenAI Latest API doesn't allow to have role "assistant" so we send everything as "user" + processed_msg: Message = Message( + role="user", + content=content, + ) + + processed_messages.append(processed_msg) + + return processed_messages + + +def stream_text(openai_messages: List[ResponseInputItemParam], protocol: str = 'data', model: str = 'gpt-5-nano', search: bool = False): + + tools: List[ToolParam] = [ + { "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location", "unit"], + "name": "get_current_weather", + "description": "Get current temperature for a given location.", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and country e.g. Bogotá, Colombia", + } }, + "required": ["location"], + "additionalProperties": False, }, - }] + "strict": True, + } + ] + + if search: + tools.append({"type": "web_search"}) + + stream: Stream[ResponseStreamEvent] = client.responses.create( + model=model, + input=openai_messages, + stream=True, + tools=tools ) # When protocol is set to "text", you will send a stream of plain text chunks - # https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#text-stream-protocol - if (protocol == 'text'): - for chunk in stream: - for choice in chunk.choices: - if choice.finish_reason == "stop": - break - else: - yield "{text}".format(text=choice.delta.content) - - # When protocol is set to "data", you will send a stream data part chunks - # https://ai-sdk.dev/docs/ai-sdk-ui/stream-protocol#data-stream-protocol + for event in stream: + text_chunk: str | None = getattr(event, "delta", None) + if text_chunk: + yield text_chunk + # When protocol is set to "data", you will send AI SDK protocol events elif (protocol == 'data'): - draft_tool_calls = [] - draft_tool_calls_index = -1 - - for chunk in stream: - for choice in chunk.choices: - if choice.finish_reason == "stop": - continue - - elif choice.finish_reason == "tool_calls": - for tool_call in draft_tool_calls: - yield '9:{{"toolCallId":"{id}","toolName":"{name}","args":{args}}}\n'.format( - id=tool_call["id"], - name=tool_call["name"], - args=tool_call["arguments"]) - - for tool_call in draft_tool_calls: - tool_result = available_tools[tool_call["name"]]( - **json.loads(tool_call["arguments"])) - - yield 'a:{{"toolCallId":"{id}","toolName":"{name}","args":{args},"result":{result}}}\n'.format( - id=tool_call["id"], - name=tool_call["name"], - args=tool_call["arguments"], - result=json.dumps(tool_result)) - - elif choice.delta.tool_calls: - for tool_call in choice.delta.tool_calls: - id = tool_call.id - name = tool_call.function.name - arguments = tool_call.function.arguments - - if (id is not None): - draft_tool_calls_index += 1 - draft_tool_calls.append( - {"id": id, "name": name, "arguments": ""}) + + # State machine + mode = "text" # Can be "text", "reasoning", "citation", or "file" + + # Buffers + tag_search_buffer = "" + in_tag_detection = False + content_buffer = "" - else: - draft_tool_calls[draft_tool_calls_index]["arguments"] += arguments + # Tool call tracking - now type-safe + tool_calls: Dict[str, ToolCallState] = {} + + # Current streaming IDs + current_text_id = None + current_reasoning_id = None - else: - yield '0:{text}\n'.format(text=json.dumps(choice.delta.content)) + def detect_tag(char: str): + """ + Detect , , , tags. + Returns: (tag_found, tag_name) or (False, None) + """ + nonlocal in_tag_detection, tag_search_buffer + + if char == "<": + in_tag_detection = True + tag_search_buffer = "<" + return False, None + + if not in_tag_detection: + return False, None + + tag_search_buffer += char + + # Check for + if tag_search_buffer == "": + in_tag_detection = False + tag_search_buffer = "" + return True, "think" + + # Check for + if tag_search_buffer == "": + in_tag_detection = False + tag_search_buffer = "" + return True, "/think" + + # Check for + if tag_search_buffer == "": + in_tag_detection = False + tag_search_buffer = "" + return True, "custom_data_citation" + + # Check for + if tag_search_buffer == "": + in_tag_detection = False + tag_search_buffer = "" + return True, "/custom_data_citation" + + # If buffer gets too long without matching, it's not one of our tags + if len(tag_search_buffer) > 30: + in_tag_detection = False + passthrough = tag_search_buffer + tag_search_buffer = "" + return True, ("passthrough", passthrough) + + # Still accumulating potential tag + return False, None - if chunk.choices == []: - usage = chunk.usage - prompt_tokens = usage.prompt_tokens - completion_tokens = usage.completion_tokens + def process_char(char: str): + """Process a single character based on current mode.""" + nonlocal mode, content_buffer, current_text_id, current_reasoning_id + + # Detect tags + tag_found, tag_name = detect_tag(char) + + if tag_found: + if isinstance(tag_name, tuple) and tag_name[0] == "passthrough": + # Not our tag, output as regular text + passthrough_text = tag_name[1] + if mode == "text": + if current_text_id is None: + current_text_id = str(uuid.uuid4()) + yield ('text-start', current_text_id) + yield ('text-delta', current_text_id, passthrough_text) + elif mode == "reasoning": + if current_reasoning_id: + yield ('reasoning-delta', current_reasoning_id, passthrough_text) + return + + elif tag_name == "think": + # End text mode, start reasoning + if current_text_id: + yield ('text-end', current_text_id) + current_text_id = None + + mode = "reasoning" + current_reasoning_id = str(uuid.uuid4()) + yield ('reasoning-start', current_reasoning_id) + content_buffer = "" + return + + elif tag_name == "/think": + # End reasoning mode, back to text + if current_reasoning_id: + yield ('reasoning-end', current_reasoning_id) + current_reasoning_id = None + mode = "text" + content_buffer = "" + return + + elif tag_name == "custom_data_citation": + # End current mode, start citation parsing + if current_text_id: + yield ('text-end', current_text_id) + current_text_id = None + if current_reasoning_id: + yield ('reasoning-end', current_reasoning_id) + current_reasoning_id = None + + mode = "citation" + content_buffer = "" + return + + elif tag_name == "/custom_data_citation": + # End citation, parse and emit + try: + citation_data = json.loads(content_buffer.strip()) + citation_id = str(uuid.uuid4()) + yield ('data-citation', citation_id, citation_data) + except json.JSONDecodeError as e: + print(f"Error parsing citation JSON: {e}") + + mode = "text" + content_buffer = "" + return + + return + + # If in tag detection, don't process character yet + if in_tag_detection: + return + + # Process character based on mode + if mode == "text": + if current_text_id is None: + current_text_id = str(uuid.uuid4()) + yield ('text-start', current_text_id) + yield ('text-delta', current_text_id, char) + + elif mode == "reasoning": + if current_reasoning_id: + yield ('reasoning-delta', current_reasoning_id, char) + + elif mode == "citation": + content_buffer += char - yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format( - reason="tool-calls" if len( - draft_tool_calls) > 0 else "stop", - prompt=prompt_tokens, - completion=completion_tokens - ) + # Main streaming loop + for event in stream: + try: + # Handle function call arguments delta + if event.type == "response.function_call_arguments.delta": + item_id_delta: str = event.item_id + delta: str = event.delta + + # Initialize tool call state if new + if item_id_delta not in tool_calls: + # Store the tool call with empty name for now + tool_calls[item_id_delta] = ToolCallState( + id=item_id_delta, + name="get_current_weather", + arguments_buffer="" + ) + + # Emit tool-input-start with the correct tool name + yield f"data: {json.dumps({ + 'type': 'tool-input-start', + 'toolCallId': item_id_delta, + 'toolName': 'get_current_weather' + })}\n\n" + + # Accumulate arguments + tool_calls[item_id_delta].arguments_buffer += delta + + # Emit tool-input-delta + yield f"data: {json.dumps({ + 'type': 'tool-input-delta', + 'toolCallId': item_id_delta, + 'inputTextDelta': delta + })}\n\n" + + # Handle function call completed + elif event.type == 'response.function_call_arguments.done': + item_id: str = event.item_id + function_name: str = event.name + arguments_str: str = event.arguments + + # Update tool call with function name from the event + if item_id in tool_calls: + tool_calls[item_id].name = function_name + + try: + # Parse the tool arguments + tool_args: dict = json.loads(arguments_str) + + # Emit tool-input-available with correct tool name + yield f"data: {json.dumps({ + 'type': 'tool-input-available', + 'toolCallId': item_id, + 'toolName': function_name, + 'input': tool_args + })}\n\n" + + # Execute the tool if available + if function_name in available_tools: + tool_result = available_tools[function_name](**tool_args) + + # Emit tool-output-available + yield f"data: {json.dumps({ + 'type': 'tool-output-available', + 'toolCallId': item_id, + 'output': tool_result + })}\n\n" + else: + print(f"Warning: Tool '{function_name}' not found in available_tools") + + # Clean up tool call state + if item_id in tool_calls: + del tool_calls[item_id] + + except json.JSONDecodeError as e: + print(f"Error parsing tool arguments for {function_name}: {e}") + print(f"Arguments string: {arguments_str}") + + # Handle regular content delta + else: + content_chunk: str | None = getattr(event, "delta", None) + + if content_chunk: + # Process content character by character + for char in content_chunk: + for result in process_char(char): + if result[0] == 'text-start': + yield f"data: {json.dumps({'type': 'text-start', 'id': result[1]})}\n\n" + elif result[0] == 'text-delta': + yield f"data: {json.dumps({'type': 'text-delta', 'id': result[1], 'delta': result[2]})}\n\n" + elif result[0] == 'text-end': + yield f"data: {json.dumps({'type': 'text-end', 'id': result[1]})}\n\n" + elif result[0] == 'reasoning-start': + yield f"data: {json.dumps({'type': 'reasoning-start', 'id': result[1]})}\n\n" + elif result[0] == 'reasoning-delta': + yield f"data: {json.dumps({'type': 'reasoning-delta', 'id': result[1], 'delta': result[2]})}\n\n" + elif result[0] == 'reasoning-end': + yield f"data: {json.dumps({'type': 'reasoning-end', 'id': result[1]})}\n\n" + elif result[0] == 'data-citation': + yield f"data: {json.dumps({'type': 'data-citation', 'id': result[1], 'data': result[2]})}\n\n" + + except Exception as event_error: + print(f"Error processing event: {event_error}") + import traceback + traceback.print_exc() + continue + + # Cleanup: End any active blocks + if current_text_id: + yield f"data: {json.dumps({'type': 'text-end', 'id': current_text_id})}\n\n" + if current_reasoning_id: + yield f"data: {json.dumps({'type': 'reasoning-end', 'id': current_reasoning_id})}\n\n" + + # Emit finish events + yield f"data: {json.dumps({'type': 'finish'})}\n\n" + yield "data: [DONE]\n\n" @app.post("/api/chat") -async def handle_chat_data(request: Request, protocol: str = Query('data')): - messages = request.messages - openai_messages = convert_to_openai_messages(messages) +async def handle_chat_data(request: Request, protocol: str = Query('data')) -> StreamingResponse: + messages: List[MessageUI] = request.messages + openai_messages: List[ResponseInputItemParam] = convert_to_openai_messages(messages, request.reasoning) + + headers = { + "Cache-Control": "no-cache", + "x-vercel-ai-data-stream": "v1", + "Content-Type": "text/event-stream", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + } + + response: StreamingResponse = StreamingResponse( + content=stream_text(openai_messages, protocol, request.model, request.search), + headers=headers, + media_type="text/event-stream", + ) - response = StreamingResponse(stream_text(openai_messages, protocol)) - response.headers['x-vercel-ai-data-stream'] = 'v1' - return response + return response \ No newline at end of file diff --git a/examples/next-fastapi/api/schemas.py b/examples/next-fastapi/api/schemas.py new file mode 100644 index 000000000000..c5e3271e4a79 --- /dev/null +++ b/examples/next-fastapi/api/schemas.py @@ -0,0 +1,113 @@ +from datetime import datetime +from enum import Enum +from typing import Annotated, Any, Literal, Optional +from uuid import uuid4 + +from pydantic import BaseModel, Field + + +class MessageUIRole(str, Enum): + USER = "user" + ASSISTANT = "assistant" + SYSTEM = "system" + + +class BaseUIPart(BaseModel): + providerMetadata: Optional[dict[str, Any]] = None + + +class TextUIPart(BaseUIPart): + type: Literal["text"] = "text" + text: str + + +class ReasoningUIPart(BaseUIPart): + type: Literal["reasoning"] = "reasoning" + text: str + + +class SourceUrlUIPart(BaseUIPart): + type: Literal["source-url"] = "source-url" + sourceId: str + url: str + title: Optional[str] = None + + +class SourceDocumentUIPart(BaseUIPart): + type: Literal["source-document"] = "source-document" + sourceId: str + mediaType: str + title: str + filename: Optional[str] = None + + +class FileUIPart(BaseUIPart): + type: Literal["file"] = "file" + url: str + mediaType: str + filename: Optional[str] = None + + +# CUSTOM DATA (AI SDK `data-${NAME}`) + + +# Citation Custom Data +class DataCitationUIPartData(BaseModel): + title: str + url: str + description: str + number: int + +class DataCitationUIPart(BaseUIPart): + type: Literal["data-citation"] = "data-citation" + data: DataCitationUIPartData + + +class ToolUIWeatherInput(BaseModel): + location: str + unit: Optional[Literal["celsius", "fahrenheit"]] = None + +class ToolUIGetWeatherToolPart(BaseUIPart): + type: Literal["tool-get_current_weather"] = "tool-get_current_weather" + input: ToolUIWeatherInput + toolCallId: str + state: str + + +MessageUIPart = Annotated[ + TextUIPart + | ReasoningUIPart + | SourceUrlUIPart + | SourceDocumentUIPart + | FileUIPart + | DataCitationUIPart + | ToolUIGetWeatherToolPart, + Field(discriminator="type"), +] +MessageUIParts = list[MessageUIPart] + + + +class FeedbackMetadata(BaseModel): + liked: bool = False + disliked: bool = False + copied: bool = False + + +class TimestampMetadata(BaseModel): + created_at: datetime = Field(default_factory=datetime.now) + updated_at: datetime = Field(default_factory=datetime.now) + + +class MessageUIMetadata(BaseModel): + timestamp: TimestampMetadata = Field(default_factory=TimestampMetadata) + feedback: FeedbackMetadata = Field(default_factory=FeedbackMetadata) + + +class MessageUI(BaseModel): + """**AI SDK V5 UIMessage** - 100% Compatible.""" + + id: str = Field(default_factory=lambda: str(uuid4())) + role: MessageUIRole + parts: MessageUIParts + metadata: Optional[MessageUIMetadata] = Field(default_factory=MessageUIMetadata) diff --git a/examples/next-fastapi/api/system_prompt.py b/examples/next-fastapi/api/system_prompt.py new file mode 100644 index 000000000000..55289fb82a5c --- /dev/null +++ b/examples/next-fastapi/api/system_prompt.py @@ -0,0 +1,49 @@ +SYSTEM_PROMPT = """ +You are a helpful assistant with deep expertise in Software Engineering, Python, and React: best practices, design patterns, frameworks, libraries, tooling, performance optimization, testing, deployment, and modern development workflows. + +- When stating a fact, explaining a concept, or referencing documentation, immediately include a compact JSON citation tag in one line like this: +{"number":1,"title":"React Documentation: Hooks","url":"https://react.dev/reference/react","description":"Official React documentation for hooks and their usage"} + +EXAMPLES: + +- **Python features**: Python 3.10 introduced structural pattern matching with the match statement, allowing more expressive conditional logic {"number":1,"title":"Python 3.10 Release Notes","url":"https://docs.python.org/3/whatsnew/3.10.html","description":"Official Python documentation on pattern matching"} + +- **React patterns**: The useCallback hook memoizes functions to prevent unnecessary re-renders of child components that depend on reference equality {"number":2,"title":"React Hooks Reference: useCallback","url":"https://react.dev/reference/react/useCallback","description":"Official documentation explaining useCallback optimization"} + +- **Architecture**: The Repository pattern separates data access logic from business logic, making code more testable and maintainable {"number":3,"title":"Martin Fowler: Repository Pattern","url":"https://martinfowler.com/eaaCatalog/repository.html","description":"Explanation of the Repository pattern in enterprise architecture"} + +- **Performance**: React's Virtual DOM reconciliation algorithm minimizes actual DOM operations by batching updates and computing minimal change sets {"number":4,"title":"React Reconciliation","url":"https://react.dev/learn/preserving-and-resetting-state","description":"How React's reconciliation process works"} + +Guidelines: +- Prefer authoritative sources (official documentation, PEPs, RFC specs, framework docs, reputable engineering blogs like Martin Fowler, Kent C. Dodds) for technical claims. +- Keep citations compact and attached to the claim they support. +- When offering architectural advice or trade-offs, label opinions clearly and explain the context where recommendations apply. +- For library or framework features, cite version-specific documentation when relevant. +- Avoid outdated patterns; when discussing legacy approaches, note modern alternatives and cite sources for current best practices. +""" + +REASONING_SYSTEM_PROMPT = """ +You are a helpful assistant with deep expertise in Software Engineering, Python, and React: best practices, design patterns, frameworks, libraries, tooling, performance optimization, testing, deployment, and modern development workflows. + +At very start of the message you should simulate reasoning inside tags to outline your thought process before providing the final answer. + +- When stating a fact, explaining a concept, or referencing documentation, immediately include a compact JSON citation tag in one line like this: +{"number":1,"title":"React Documentation: Hooks","url":"https://react.dev/reference/react","description":"Official React documentation for hooks and their usage"} + +EXAMPLES: + +- **Python features**: Python 3.10 introduced structural pattern matching with the match statement, allowing more expressive conditional logic {"number":1,"title":"Python 3.10 Release Notes","url":"https://docs.python.org/3/whatsnew/3.10.html","description":"Official Python documentation on pattern matching"} + +- **React patterns**: The useCallback hook memoizes functions to prevent unnecessary re-renders of child components that depend on reference equality {"number":2,"title":"React Hooks Reference: useCallback","url":"https://react.dev/reference/react/useCallback","description":"Official documentation explaining useCallback optimization"} + +- **Architecture**: The Repository pattern separates data access logic from business logic, making code more testable and maintainable {"number":3,"title":"Martin Fowler: Repository Pattern","url":"https://martinfowler.com/eaaCatalog/repository.html","description":"Explanation of the Repository pattern in enterprise architecture"} + +- **Performance**: React's Virtual DOM reconciliation algorithm minimizes actual DOM operations by batching updates and computing minimal change sets {"number":4,"title":"React Reconciliation","url":"https://react.dev/learn/preserving-and-resetting-state","description":"How React's reconciliation process works"} + +Guidelines: +- Prefer authoritative sources (official documentation, PEPs, RFC specs, framework docs, reputable engineering blogs like Martin Fowler, Kent C. Dodds) for technical claims. +- Keep citations compact and attached to the claim they support. +- When offering architectural advice or trade-offs, label opinions clearly and explain the context where recommendations apply. +- For library or framework features, cite version-specific documentation when relevant. +- Avoid outdated patterns; when discussing legacy approaches, note modern alternatives and cite sources for current best practices. +""" \ No newline at end of file diff --git a/examples/next-fastapi/api/utils/tools.py b/examples/next-fastapi/api/tools.py similarity index 80% rename from examples/next-fastapi/api/utils/tools.py rename to examples/next-fastapi/api/tools.py index f6d1514447d0..9f812b282c67 100644 --- a/examples/next-fastapi/api/utils/tools.py +++ b/examples/next-fastapi/api/tools.py @@ -1,7 +1,9 @@ + import random def get_current_weather(location, unit="fahrenheit"): + print(f"TOOL CALL : Getting weather for {location} in {unit}") if unit == "celsius": temperature = random.randint(-34, 43) else: @@ -11,4 +13,4 @@ def get_current_weather(location, unit="fahrenheit"): "temperature": temperature, "unit": unit, "location": location, - } + } \ No newline at end of file diff --git a/examples/next-fastapi/api/utils/__init__.py b/examples/next-fastapi/api/utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/examples/next-fastapi/api/utils/prompt.py b/examples/next-fastapi/api/utils/prompt.py deleted file mode 100644 index 6674b85e0103..000000000000 --- a/examples/next-fastapi/api/utils/prompt.py +++ /dev/null @@ -1,75 +0,0 @@ -import json -from pydantic import BaseModel -from typing import List, Optional -from .types import ClientAttachment, ToolInvocation - - -class ClientMessage(BaseModel): - role: str - content: str - experimental_attachments: Optional[List[ClientAttachment]] = None - toolInvocations: Optional[List[ToolInvocation]] = None - - -def convert_to_openai_messages(messages: List[ClientMessage]): - openai_messages = [] - - for message in messages: - parts = [] - - parts.append({ - 'type': 'text', - 'text': message.content - }) - - if (message.experimental_attachments): - for attachment in message.experimental_attachments: - if (attachment.contentType.startswith('image')): - parts.append({ - 'type': 'image_url', - 'image_url': { - 'url': attachment.url - } - }) - - elif (attachment.contentType.startswith('text')): - parts.append({ - 'type': 'text', - 'text': attachment.url - }) - - if (message.toolInvocations): - tool_calls = [ - { - 'id': tool_invocation.toolCallId, - 'type': 'function', - 'function': { - 'name': tool_invocation.toolName, - 'arguments': json.dumps(tool_invocation.args) - } - } - for tool_invocation in message.toolInvocations] - - openai_messages.append({ - "role": 'assistant', - "tool_calls": tool_calls - }) - - tool_results = [ - { - 'role': 'tool', - 'content': json.dumps(tool_invocation.result), - 'tool_call_id': tool_invocation.toolCallId - } - for tool_invocation in message.toolInvocations] - - openai_messages.extend(tool_results) - - continue - - openai_messages.append({ - "role": message.role, - "content": parts - }) - - return openai_messages diff --git a/examples/next-fastapi/api/utils/types.py b/examples/next-fastapi/api/utils/types.py deleted file mode 100644 index 5c467d8dca04..000000000000 --- a/examples/next-fastapi/api/utils/types.py +++ /dev/null @@ -1,14 +0,0 @@ -from pydantic import BaseModel - - -class ClientAttachment(BaseModel): - name: str - contentType: str - url: str - - -class ToolInvocation(BaseModel): - toolCallId: str - toolName: str - args: dict - result: dict diff --git a/examples/next-fastapi/app/(examples)/01-chat-text/layout.tsx b/examples/next-fastapi/app/(examples)/01-chat-text/layout.tsx deleted file mode 100644 index cc7f42d8c1db..000000000000 --- a/examples/next-fastapi/app/(examples)/01-chat-text/layout.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { Metadata } from 'next'; - -export const metadata: Metadata = { - title: 'useChat', -}; - -export default function Layout({ children }: { children: React.ReactNode }) { - return <>{children}; -} diff --git a/examples/next-fastapi/app/(examples)/01-chat-text/page.tsx b/examples/next-fastapi/app/(examples)/01-chat-text/page.tsx deleted file mode 100644 index a5fe4497c45a..000000000000 --- a/examples/next-fastapi/app/(examples)/01-chat-text/page.tsx +++ /dev/null @@ -1,51 +0,0 @@ -'use client'; - -import { Card } from '@/app/components'; -import { useChat } from '@ai-sdk/react'; -import { TextStreamChatTransport } from 'ai'; -import { useState } from 'react'; - -export default function Page() { - const [input, setInput] = useState(''); - const { messages, sendMessage, status } = useChat({ - transport: new TextStreamChatTransport({ - api: '/api/chat?protocol=text', - }), - }); - - return ( -
-
- {messages.map(message => ( -
-
{`${message.role}: `}
-
- {message.parts - .map(part => (part.type === 'text' ? part.text : '')) - .join('')} -
-
- ))} -
- - {messages.length === 0 && } - -
{ - e.preventDefault(); - sendMessage({ text: input }); - setInput(''); - }} - className="fixed bottom-0 flex flex-col w-full border-t" - > - setInput(e.target.value)} - className="w-full p-4 bg-transparent outline-none" - disabled={status !== 'ready'} - /> -
-
- ); -} diff --git a/examples/next-fastapi/app/(examples)/02-chat-data/layout.tsx b/examples/next-fastapi/app/(examples)/02-chat-data/layout.tsx deleted file mode 100644 index cc7f42d8c1db..000000000000 --- a/examples/next-fastapi/app/(examples)/02-chat-data/layout.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { Metadata } from 'next'; - -export const metadata: Metadata = { - title: 'useChat', -}; - -export default function Layout({ children }: { children: React.ReactNode }) { - return <>{children}; -} diff --git a/examples/next-fastapi/app/(examples)/02-chat-data/page.tsx b/examples/next-fastapi/app/(examples)/02-chat-data/page.tsx deleted file mode 100644 index aa574d8db0d4..000000000000 --- a/examples/next-fastapi/app/(examples)/02-chat-data/page.tsx +++ /dev/null @@ -1,64 +0,0 @@ -'use client'; - -import { Card } from '@/app/components'; -import { useChat } from '@ai-sdk/react'; -import { getToolName, isToolUIPart } from 'ai'; -import { GeistMono } from 'geist/font/mono'; -import { useState } from 'react'; - -export default function Page() { - const [input, setInput] = useState(''); - const { messages, sendMessage, status } = useChat(); - - return ( -
-
- {messages.map(message => ( -
-
{`${message.role}: `}
- -
- {message.parts.map((part, index) => { - if (part.type === 'text') { - return
{part.text}
; - } else if (isToolUIPart(part)) { - return ( -
- {`${getToolName(part)}(${JSON.stringify( - part.input, - null, - 2, - )})`} -
- ); - } - })} -
-
- ))} -
- - {messages.length === 0 && } - -
{ - e.preventDefault(); - sendMessage({ text: input }); - setInput(''); - }} - className="fixed bottom-0 flex flex-col w-full border-t" - > - setInput(e.target.value)} - className="w-full p-4 bg-transparent outline-none" - disabled={status !== 'ready'} - /> -
-
- ); -} diff --git a/examples/next-fastapi/app/(examples)/03-chat-attachments/layout.tsx b/examples/next-fastapi/app/(examples)/03-chat-attachments/layout.tsx deleted file mode 100644 index 5841b2f21d2f..000000000000 --- a/examples/next-fastapi/app/(examples)/03-chat-attachments/layout.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { Metadata } from 'next'; - -export const metadata: Metadata = { - title: 'useChat with attachments', -}; - -export default function Layout({ children }: { children: React.ReactNode }) { - return <>{children}; -} diff --git a/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx b/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx deleted file mode 100644 index 173b4da7f4e0..000000000000 --- a/examples/next-fastapi/app/(examples)/03-chat-attachments/page.tsx +++ /dev/null @@ -1,108 +0,0 @@ -'use client'; - -import { Card } from '@/app/components'; -/* eslint-disable @next/next/no-img-element */ -import { useChat } from '@ai-sdk/react'; -import { useRef, useState } from 'react'; - -export default function Page() { - const [input, setInput] = useState(''); - const { messages, sendMessage, status } = useChat(); - - const [files, setFiles] = useState(undefined); - const fileInputRef = useRef(null); - - return ( -
-
- {messages.map(message => ( -
-
{`${message.role}: `}
-
- {message.parts.map((part, index) => { - if (part.type === 'text') { - return
{part.text}
; - } - if ( - part.type === 'file' && - part.mediaType?.startsWith('image/') - ) { - return ( -
- -
- ); - } - })} -
-
- ))} -
- - {messages.length === 0 && } - -
{ - sendMessage({ text: input, files }); - setInput(''); - setFiles(undefined); - - if (fileInputRef.current) { - fileInputRef.current.value = ''; - } - }} - className="fixed bottom-0 flex flex-col w-full gap-3 p-4 border-t h-28" - > -
- {files - ? Array.from(files).map(attachment => { - const { type } = attachment; - - if (type.startsWith('image/')) { - return ( -
- {attachment.name} - - {attachment.name} - -
- ); - } else if (type.startsWith('text/')) { - return ( -
-
- {attachment.name} -
- ); - } - }) - : ''} -
- { - if (event.target.files) { - setFiles(event.target.files); - } - }} - multiple - ref={fileInputRef} - /> - setInput(e.target.value)} - className="w-full bg-transparent outline-none" - disabled={status !== 'ready'} - /> - -
- ); -} diff --git a/examples/next-fastapi/app/components.tsx b/examples/next-fastapi/app/components.tsx deleted file mode 100644 index 8b66ba94ae70..000000000000 --- a/examples/next-fastapi/app/components.tsx +++ /dev/null @@ -1,99 +0,0 @@ -import { GeistMono } from 'geist/font/mono'; -import Link from 'next/link'; -import { ReactNode } from 'react'; - -const Code = ({ children }: { children: ReactNode }) => { - return ( - - {children} - - ); -}; - -export const Card = ({ type }: { type: string }) => { - return type === 'chat-text' ? ( -
-
-
- Stream Chat Completions -
-
-

- The useChat hook can be integrated with a Python - FastAPI backend to stream chat completions in real-time. The most - basic setup involves streaming plain text chunks by setting the{' '} - streamProtocol to text. -

- -

- To make your responses streamable, you will have to use the{' '} - StreamingResponse class provided by FastAPI. -

-
-
-
- ) : type === 'chat-data' ? ( -
-
-
- Stream Chat Completions with Tools -
-
-

- The useChat hook can be integrated with a Python - FastAPI backend to stream chat completions in real-time. However, - the most basic setup that involves streaming plain text chunks by - setting the streamProtocol to text is - limited. -

- -

- As a result, setting the streamProtocol to data allows - you to stream chunks that include information about tool calls and - results. -

- -

- To make your responses streamable, you will have to use the{' '} - StreamingResponse class provided by FastAPI. You will - also have to ensure that your chunks follow the{' '} - - data stream protocol - {' '} - and that the response has x-vercel-ai-data-stream{' '} - header set to v1. -

-
-
-
- ) : type === 'chat-attachments' ? ( -
-
-
- Stream Chat Completions with Attachments -
-
-

- The useChat hook can be integrated with a Python - FastAPI backend to stream chat completions in real-time. To make - your responses streamable, you will have to use the{' '} - StreamingResponse class provided by FastAPI. -

- -

- Furthermore, you can send files along with your messages by setting{' '} - experimental_attachments to true in{' '} - handleSubmit. This will allow you to use process these - attachments in your FastAPI backend. -

-
-
-
- ) : null; -}; diff --git a/examples/next-fastapi/app/globals.css b/examples/next-fastapi/app/globals.css index 426d8b2c4bd1..dc98be74c475 100644 --- a/examples/next-fastapi/app/globals.css +++ b/examples/next-fastapi/app/globals.css @@ -1,9 +1,122 @@ -@tailwind base; -@tailwind components; -@tailwind utilities; +@import "tailwindcss"; +@import "tw-animate-css"; + +@custom-variant dark (&:is(.dark *)); + +@theme inline { + --color-background: var(--background); + --color-foreground: var(--foreground); + --font-sans: var(--font-geist-sans); + --font-mono: var(--font-geist-mono); + --color-sidebar-ring: var(--sidebar-ring); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar: var(--sidebar); + --color-chart-5: var(--chart-5); + --color-chart-4: var(--chart-4); + --color-chart-3: var(--chart-3); + --color-chart-2: var(--chart-2); + --color-chart-1: var(--chart-1); + --color-ring: var(--ring); + --color-input: var(--input); + --color-border: var(--border); + --color-destructive: var(--destructive); + --color-accent-foreground: var(--accent-foreground); + --color-accent: var(--accent); + --color-muted-foreground: var(--muted-foreground); + --color-muted: var(--muted); + --color-secondary-foreground: var(--secondary-foreground); + --color-secondary: var(--secondary); + --color-primary-foreground: var(--primary-foreground); + --color-primary: var(--primary); + --color-popover-foreground: var(--popover-foreground); + --color-popover: var(--popover); + --color-card-foreground: var(--card-foreground); + --color-card: var(--card); + --radius-sm: calc(var(--radius) - 4px); + --radius-md: calc(var(--radius) - 2px); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) + 4px); +} :root { - --foreground-rgb: 0, 0, 0; - --background-start-rgb: 214, 219, 220; - --background-end-rgb: 255, 255, 255; + --radius: 0.625rem; + --background: oklch(1 0 0); + --foreground: oklch(0.145 0 0); + --card: oklch(1 0 0); + --card-foreground: oklch(0.145 0 0); + --popover: oklch(1 0 0); + --popover-foreground: oklch(0.145 0 0); + --primary: oklch(0.205 0 0); + --primary-foreground: oklch(0.985 0 0); + --secondary: oklch(0.97 0 0); + --secondary-foreground: oklch(0.205 0 0); + --muted: oklch(0.97 0 0); + --muted-foreground: oklch(0.556 0 0); + --accent: oklch(0.97 0 0); + --accent-foreground: oklch(0.205 0 0); + --destructive: oklch(0.577 0.245 27.325); + --border: oklch(0.922 0 0); + --input: oklch(0.922 0 0); + --ring: oklch(0.708 0 0); + --chart-1: oklch(0.646 0.222 41.116); + --chart-2: oklch(0.6 0.118 184.704); + --chart-3: oklch(0.398 0.07 227.392); + --chart-4: oklch(0.828 0.189 84.429); + --chart-5: oklch(0.769 0.188 70.08); + --sidebar: oklch(0.985 0 0); + --sidebar-foreground: oklch(0.145 0 0); + --sidebar-primary: oklch(0.205 0 0); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.97 0 0); + --sidebar-accent-foreground: oklch(0.205 0 0); + --sidebar-border: oklch(0.922 0 0); + --sidebar-ring: oklch(0.708 0 0); +} + +.dark { + --background: oklch(0.145 0 0); + --foreground: oklch(0.985 0 0); + --card: oklch(0.205 0 0); + --card-foreground: oklch(0.985 0 0); + --popover: oklch(0.205 0 0); + --popover-foreground: oklch(0.985 0 0); + --primary: oklch(0.922 0 0); + --primary-foreground: oklch(0.205 0 0); + --secondary: oklch(0.269 0 0); + --secondary-foreground: oklch(0.985 0 0); + --muted: oklch(0.269 0 0); + --muted-foreground: oklch(0.708 0 0); + --accent: oklch(0.269 0 0); + --accent-foreground: oklch(0.985 0 0); + --destructive: oklch(0.704 0.191 22.216); + --border: oklch(1 0 0 / 10%); + --input: oklch(1 0 0 / 15%); + --ring: oklch(0.556 0 0); + --chart-1: oklch(0.488 0.243 264.376); + --chart-2: oklch(0.696 0.17 162.48); + --chart-3: oklch(0.769 0.188 70.08); + --chart-4: oklch(0.627 0.265 303.9); + --chart-5: oklch(0.645 0.246 16.439); + --sidebar: oklch(0.205 0 0); + --sidebar-foreground: oklch(0.985 0 0); + --sidebar-primary: oklch(0.488 0.243 264.376); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.269 0 0); + --sidebar-accent-foreground: oklch(0.985 0 0); + --sidebar-border: oklch(1 0 0 / 10%); + --sidebar-ring: oklch(0.556 0 0); +} + +@layer base { + * { + @apply border-border outline-ring/50; + } + body { + @apply bg-background text-foreground; + } } diff --git a/examples/next-fastapi/app/icons.tsx b/examples/next-fastapi/app/icons.tsx deleted file mode 100644 index 7d04f2ac7520..000000000000 --- a/examples/next-fastapi/app/icons.tsx +++ /dev/null @@ -1,106 +0,0 @@ -export const LogoPython = () => ( - - - - - - - - - - - - - - -); - -export const LogoNext = () => ( - - - - - - - - - - - - - - - - - - - - - - -); diff --git a/examples/next-fastapi/app/layout.tsx b/examples/next-fastapi/app/layout.tsx index ff7408454dd9..74c2719e6b10 100644 --- a/examples/next-fastapi/app/layout.tsx +++ b/examples/next-fastapi/app/layout.tsx @@ -1,29 +1,32 @@ -import './globals.css'; -import { LogoNext, LogoPython } from './icons'; -import Link from 'next/link'; -import { GeistSans } from 'geist/font/sans'; +import type {Metadata} from "next"; +import {Geist, Geist_Mono} from "next/font/google"; +import "./globals.css"; -import { Metadata } from 'next'; +const geistSans = Geist({ + variable: "--font-geist-sans", + subsets: ["latin"], +}); + +const geistMono = Geist_Mono({ + variable: "--font-geist-mono", + subsets: ["latin"], +}); export const metadata: Metadata = { - title: 'AI SDK and FastAPI Examples', + title: "FastAPI + Next.js AI Chat", + description: "Generated by create next app", }; export default function RootLayout({ children, -}: { +}: Readonly<{ children: React.ReactNode; -}) { +}>) { return ( - - -
- -
+
- -
- + {children} diff --git a/examples/next-fastapi/app/page.tsx b/examples/next-fastapi/app/page.tsx index 5402cb20d250..c9339411b0b7 100644 --- a/examples/next-fastapi/app/page.tsx +++ b/examples/next-fastapi/app/page.tsx @@ -1,29 +1,282 @@ -import Link from 'next/link'; - -const examples = [ - { - title: 'useChat', - link: '/01-chat-text', - }, - { - title: 'useChat with tools', - link: '/02-chat-data', - }, - { - title: 'useChat with attachments', - link: '/03-chat-attachments', - }, -]; - -export default function Home() { +"use client"; +import {MessageUI, MessageUIPart} from "@/app/types"; +import { + Conversation, + ConversationContent, + ConversationScrollButton, +} from "@/components/ai-elements/conversation"; +import { + Message, + MessageAvatar, + MessageContent, +} from "@/components/ai-elements/message"; +import { + PromptInput, + PromptInputActionAddAttachments, + PromptInputActionMenu, + PromptInputActionMenuContent, + PromptInputActionMenuTrigger, + PromptInputAttachment, + PromptInputAttachments, + PromptInputBody, + PromptInputButton, + PromptInputFooter, + PromptInputHeader, + PromptInputMessage, + PromptInputModelSelect, + PromptInputModelSelectContent, + PromptInputModelSelectItem, + PromptInputModelSelectTrigger, + PromptInputModelSelectValue, + PromptInputSubmit, + PromptInputTextarea, + PromptInputTools, +} from "@/components/ai-elements/prompt-input"; +import {Reasoning} from "@/components/ai-elements/reasoning"; +import {Response} from "@/components/ai-elements/response"; +import { + Tool, + ToolContent, + ToolHeader, + ToolInput, +} from "@/components/ai-elements/tool"; +import {useChat} from "@ai-sdk/react"; +import {DefaultChatTransport} from "ai"; +import {BrainIcon, GlobeIcon} from "lucide-react"; +import {useState, useMemo, useRef, useEffect} from "react"; + +function Page() { + const [input, setInput] = useState(""); + const [useWebSearch, setUseWebSearch] = useState(false); + const [useReasoning, setUseReasoning] = useState(false); + const [model, setModel] = useState("gpt-5-nano"); + + const modelRef = useRef(model); + const searchRef = useRef(useWebSearch); + const reasoningRef = useRef(useReasoning); + + useEffect(() => { + modelRef.current = model; + }, [model]); + + useEffect(() => { + searchRef.current = useWebSearch; + }, [useWebSearch]); + + useEffect(() => { + reasoningRef.current = useReasoning; + }, [useReasoning]); + + const models = [ + {id: "gpt-5-nano", name: "GPT-5 Nano"}, + {id: "gpt-4o", name: "GPT-4o"}, + {id: "gpt-4", name: "GPT-4"}, + {id: "gpt-3.5-turbo", name: "GPT-3.5 Turbo"}, + ]; + + const transport = useMemo( + () => + // eslint-disable-next-line react-hooks/refs + new DefaultChatTransport({ + api: "http://localhost:8000/api/chat", + headers: { + "Content-Type": "application/json", + }, + prepareSendMessagesRequest: (requestOptions) => { + const {messages, trigger, messageId} = requestOptions; + return { + body: { + model: modelRef.current, + messages: messages, + search: searchRef.current, + reasoning: reasoningRef.current, + trigger, + messageId, + }, + }; + }, + }), + [] + ); + + const {messages, sendMessage, status} = useChat({ + transport, + }); + + async function handleSubmit(message: PromptInputMessage) { + const textToSend = message.text || input; + if (!textToSend.trim()) return; + + const parts: MessageUIPart[] = [{type: "text", text: textToSend}]; + + if (message.files?.length) { + message.files.forEach((attachment) => { + parts.push({ + type: "file", + filename: attachment.filename, + mediaType: attachment.mediaType, + url: attachment.url, + }); + }); + } + + try { + await sendMessage({ + role: "user", + parts, + }); + setInput(""); + } catch (error) { + console.error("Failed to send message:", error); + } + } + return ( -
- {examples.map((example, index) => ( - -
{index + 1}.
-
{example.title}
- - ))} -
+
+ + + {messages.map((message) => ( + + + + + + + ))} + + + +
+
+ + + + {(attachment) => } + + + + setInput(event.target.value)} + value={input} + /> + + + + + + + + + + setUseWebSearch(!useWebSearch)} + variant={useWebSearch ? "default" : "ghost"} + > + + Search + + setUseReasoning(!useReasoning)} + variant={useReasoning ? "default" : "ghost"} + > + + Reasoning + + + + + + + {models.map((model) => ( + + {model.name} + + ))} + + + + + + +
+
+
+ ); +} +export default Page; + +function MessageParts({message}: {message: MessageUI}) { + return ( + <> + {message.parts.map((part, index) => { + switch (part.type) { + case "reasoning": + return ( + + {part.text} + + ); + case "text": + return {part.text}; + case "file": + return ( + + ); + case "tool-get_current_weather": + return ( + + + + + + + ); + case "data-citation": + return ( +
+ Citation: {part.data.description} ( + + Link + + ) +
+ ); + + default: + return null; + } + })} + ); } diff --git a/examples/next-fastapi/app/types.ts b/examples/next-fastapi/app/types.ts new file mode 100644 index 000000000000..2d475805d0b3 --- /dev/null +++ b/examples/next-fastapi/app/types.ts @@ -0,0 +1,60 @@ +import type {UIMessage} from "ai"; + +export type MessageUIRole = "user" | "assistant" | "system"; + +// CUSTOM DATA (AI SDK `data-${NAME}`) + +export interface DataCitationUIPart { + title: string; + url: string; + description: string; + number: number; +} + +export type MessageUICustomData = { + citation: DataCitationUIPart; +}; + +// METADATA +export interface FeedbackMetadata { + liked: boolean; + disliked: boolean; + copied: boolean; +} + +export interface TimestampMetadata { + created_at: Date; + updated_at: Date; +} + +export interface MessageUIMetadata { + timestamp: TimestampMetadata; + feedback: FeedbackMetadata; +} + +// CUSTOM TOOLS (AI SDK `tool-${NAME}`) + +export interface GetCurrentWeatherInput { + location: string; +} + +export interface GetCurrentWeatherOutput { + location: string; + temperature: number; + conditions?: string; +} + +export type MessageUICustomTools = { + get_current_weather: { + input: GetCurrentWeatherInput; + output: GetCurrentWeatherOutput; + }; +}; + +export type MessageUIPart = MessageUI["parts"][number]; + +export type MessageUI = UIMessage< + MessageUIMetadata, + MessageUICustomData, + MessageUICustomTools +>; diff --git a/examples/next-fastapi/components.json b/examples/next-fastapi/components.json new file mode 100644 index 000000000000..b7b9791c70c6 --- /dev/null +++ b/examples/next-fastapi/components.json @@ -0,0 +1,22 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "", + "css": "app/globals.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "iconLibrary": "lucide", + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "registries": {} +} diff --git a/examples/next-fastapi/components/ai-elements/code-block.tsx b/examples/next-fastapi/components/ai-elements/code-block.tsx new file mode 100644 index 000000000000..272814b6ea31 --- /dev/null +++ b/examples/next-fastapi/components/ai-elements/code-block.tsx @@ -0,0 +1,179 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +"use client"; + +import {Button} from "@/components/ui/button"; +import {cn} from "@/lib/utils"; +import {CheckIcon, CopyIcon} from "lucide-react"; +import { + type ComponentProps, + createContext, + type HTMLAttributes, + useContext, + useEffect, + useRef, + useState, +} from "react"; +import {type BundledLanguage, codeToHtml, type ShikiTransformer} from "shiki"; + +type CodeBlockProps = HTMLAttributes & { + code: string; + language: BundledLanguage; + showLineNumbers?: boolean; +}; + +type CodeBlockContextType = { + code: string; +}; + +const CodeBlockContext = createContext({ + code: "", +}); + +const lineNumberTransformer: ShikiTransformer = { + name: "line-numbers", + line(node: any, line: number) { + node.children.unshift({ + type: "element", + tagName: "span", + properties: { + className: [ + "inline-block", + "min-w-10", + "mr-4", + "text-right", + "select-none", + "text-muted-foreground", + ], + }, + children: [{type: "text", value: String(line)}], + }); + }, +}; + +export async function highlightCode( + code: string, + language: BundledLanguage, + showLineNumbers = false +) { + const transformers: ShikiTransformer[] = showLineNumbers + ? [lineNumberTransformer] + : []; + + return await Promise.all([ + codeToHtml(code, { + lang: language, + theme: "one-light", + transformers, + }), + codeToHtml(code, { + lang: language, + theme: "one-dark-pro", + transformers, + }), + ]); +} + +export const CodeBlock = ({ + code, + language, + showLineNumbers = false, + className, + children, + ...props +}: CodeBlockProps) => { + const [html, setHtml] = useState(""); + const [darkHtml, setDarkHtml] = useState(""); + const mounted = useRef(false); + + useEffect(() => { + highlightCode(code, language, showLineNumbers).then(([light, dark]) => { + if (!mounted.current) { + setHtml(light); + setDarkHtml(dark); + mounted.current = true; + } + }); + + return () => { + mounted.current = false; + }; + }, [code, language, showLineNumbers]); + + return ( + +
+
+
+
+ {children && ( +
+ {children} +
+ )} +
+
+ + ); +}; + +export type CodeBlockCopyButtonProps = ComponentProps & { + onCopy?: () => void; + onError?: (error: Error) => void; + timeout?: number; +}; + +export const CodeBlockCopyButton = ({ + onCopy, + onError, + timeout = 2000, + children, + className, + ...props +}: CodeBlockCopyButtonProps) => { + const [isCopied, setIsCopied] = useState(false); + const {code} = useContext(CodeBlockContext); + + const copyToClipboard = async () => { + if (typeof window === "undefined" || !navigator?.clipboard?.writeText) { + onError?.(new Error("Clipboard API not available")); + return; + } + + try { + await navigator.clipboard.writeText(code); + setIsCopied(true); + onCopy?.(); + setTimeout(() => setIsCopied(false), timeout); + } catch (error) { + onError?.(error as Error); + } + }; + + const Icon = isCopied ? CheckIcon : CopyIcon; + + return ( + + ); +}; diff --git a/examples/next-fastapi/components/ai-elements/conversation.tsx b/examples/next-fastapi/components/ai-elements/conversation.tsx new file mode 100644 index 000000000000..a007df7da439 --- /dev/null +++ b/examples/next-fastapi/components/ai-elements/conversation.tsx @@ -0,0 +1,97 @@ +"use client"; + +import { Button } from "@/components/ui/button"; +import { cn } from "@/lib/utils"; +import { ArrowDownIcon } from "lucide-react"; +import type { ComponentProps } from "react"; +import { useCallback } from "react"; +import { StickToBottom, useStickToBottomContext } from "use-stick-to-bottom"; + +export type ConversationProps = ComponentProps; + +export const Conversation = ({ className, ...props }: ConversationProps) => ( + +); + +export type ConversationContentProps = ComponentProps< + typeof StickToBottom.Content +>; + +export const ConversationContent = ({ + className, + ...props +}: ConversationContentProps) => ( + +); + +export type ConversationEmptyStateProps = ComponentProps<"div"> & { + title?: string; + description?: string; + icon?: React.ReactNode; +}; + +export const ConversationEmptyState = ({ + className, + title = "No messages yet", + description = "Start a conversation to see messages here", + icon, + children, + ...props +}: ConversationEmptyStateProps) => ( +
+ {children ?? ( + <> + {icon &&
{icon}
} +
+

{title}

+ {description && ( +

{description}

+ )} +
+ + )} +
+); + +export type ConversationScrollButtonProps = ComponentProps; + +export const ConversationScrollButton = ({ + className, + ...props +}: ConversationScrollButtonProps) => { + const { isAtBottom, scrollToBottom } = useStickToBottomContext(); + + const handleScrollToBottom = useCallback(() => { + scrollToBottom(); + }, [scrollToBottom]); + + return ( + !isAtBottom && ( + + ) + ); +}; diff --git a/examples/next-fastapi/components/ai-elements/message.tsx b/examples/next-fastapi/components/ai-elements/message.tsx new file mode 100644 index 000000000000..e4c0ed82d538 --- /dev/null +++ b/examples/next-fastapi/components/ai-elements/message.tsx @@ -0,0 +1,80 @@ +import { + Avatar, + AvatarFallback, + AvatarImage, +} from "@/components/ui/avatar"; +import { cn } from "@/lib/utils"; +import type { UIMessage } from "ai"; +import { cva, type VariantProps } from "class-variance-authority"; +import type { ComponentProps, HTMLAttributes } from "react"; + +export type MessageProps = HTMLAttributes & { + from: UIMessage["role"]; +}; + +export const Message = ({ className, from, ...props }: MessageProps) => ( +
+); + +const messageContentVariants = cva( + "is-user:dark flex flex-col gap-2 overflow-hidden rounded-lg text-sm", + { + variants: { + variant: { + contained: [ + "max-w-[80%] px-4 py-3", + "group-[.is-user]:bg-primary group-[.is-user]:text-primary-foreground", + "group-[.is-assistant]:bg-secondary group-[.is-assistant]:text-foreground", + ], + flat: [ + "group-[.is-user]:max-w-[80%] group-[.is-user]:bg-secondary group-[.is-user]:px-4 group-[.is-user]:py-3 group-[.is-user]:text-foreground", + "group-[.is-assistant]:text-foreground", + ], + }, + }, + defaultVariants: { + variant: "contained", + }, + } +); + +export type MessageContentProps = HTMLAttributes & + VariantProps; + +export const MessageContent = ({ + children, + className, + variant, + ...props +}: MessageContentProps) => ( +
+ {children} +
+); + +export type MessageAvatarProps = ComponentProps & { + src: string; + name?: string; +}; + +export const MessageAvatar = ({ + src, + name, + className, + ...props +}: MessageAvatarProps) => ( + + + {name?.slice(0, 2) || "ME"} + +); diff --git a/examples/next-fastapi/components/ai-elements/prompt-input.tsx b/examples/next-fastapi/components/ai-elements/prompt-input.tsx new file mode 100644 index 000000000000..249095d89157 --- /dev/null +++ b/examples/next-fastapi/components/ai-elements/prompt-input.tsx @@ -0,0 +1,1353 @@ +/* eslint-disable react-hooks/set-state-in-effect */ +/* eslint-disable @typescript-eslint/no-explicit-any */ +"use client"; + +import {Button} from "@/components/ui/button"; +import { + Command, + CommandEmpty, + CommandGroup, + CommandInput, + CommandItem, + CommandList, + CommandSeparator, +} from "@/components/ui/command"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@/components/ui/dropdown-menu"; +import { + HoverCard, + HoverCardContent, + HoverCardTrigger, +} from "@/components/ui/hover-card"; +import { + InputGroup, + InputGroupAddon, + InputGroupButton, + InputGroupTextarea, +} from "@/components/ui/input-group"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; +import {cn} from "@/lib/utils"; +import type {ChatStatus, FileUIPart} from "ai"; +import { + ImageIcon, + Loader2Icon, + MicIcon, + PaperclipIcon, + PlusIcon, + SendIcon, + SquareIcon, + XIcon, +} from "lucide-react"; +import {nanoid} from "nanoid"; +import { + type ChangeEvent, + type ChangeEventHandler, + Children, + type ClipboardEventHandler, + type ComponentProps, + createContext, + type FormEvent, + type FormEventHandler, + Fragment, + type HTMLAttributes, + type KeyboardEventHandler, + type PropsWithChildren, + type ReactNode, + type RefObject, + useCallback, + useContext, + useEffect, + useMemo, + useRef, + useState, +} from "react"; +// ============================================================================ +// Provider Context & Types +// ============================================================================ + +export type AttachmentsContext = { + files: (FileUIPart & {id: string})[]; + add: (files: File[] | FileList) => void; + remove: (id: string) => void; + clear: () => void; + openFileDialog: () => void; + fileInputRef: RefObject; +}; + +export type TextInputContext = { + value: string; + setInput: (v: string) => void; + clear: () => void; +}; + +export type PromptInputControllerProps = { + textInput: TextInputContext; + attachments: AttachmentsContext; + /** INTERNAL: Allows PromptInput to register its file textInput + "open" callback */ + __registerFileInput: ( + ref: RefObject, + open: () => void + ) => void; +}; + +const PromptInputController = createContext( + null +); +const ProviderAttachmentsContext = createContext( + null +); + +export const usePromptInputController = () => { + const ctx = useContext(PromptInputController); + if (!ctx) { + throw new Error( + "Wrap your component inside to use usePromptInputController()." + ); + } + return ctx; +}; + +// Optional variants (do NOT throw). Useful for dual-mode components. +const useOptionalPromptInputController = () => + useContext(PromptInputController); + +export const useProviderAttachments = () => { + const ctx = useContext(ProviderAttachmentsContext); + if (!ctx) { + throw new Error( + "Wrap your component inside to use useProviderAttachments()." + ); + } + return ctx; +}; + +const useOptionalProviderAttachments = () => + useContext(ProviderAttachmentsContext); + +export type PromptInputProviderProps = PropsWithChildren<{ + initialInput?: string; +}>; + +/** + * Optional global provider that lifts PromptInput state outside of PromptInput. + * If you don't use it, PromptInput stays fully self-managed. + */ +export function PromptInputProvider({ + initialInput: initialTextInput = "", + children, +}: PromptInputProviderProps) { + // ----- textInput state + const [textInput, setTextInput] = useState(initialTextInput); + const clearInput = useCallback(() => setTextInput(""), []); + + // ----- attachments state (global when wrapped) + const [attachements, setAttachements] = useState< + (FileUIPart & {id: string})[] + >([]); + const fileInputRef = useRef(null); + const openRef = useRef<() => void>(() => {}); + + const add = useCallback((files: File[] | FileList) => { + const incoming = Array.from(files); + if (incoming.length === 0) return; + + setAttachements((prev) => + prev.concat( + incoming.map((file) => ({ + id: nanoid(), + type: "file" as const, + url: URL.createObjectURL(file), + mediaType: file.type, + filename: file.name, + })) + ) + ); + }, []); + + const remove = useCallback((id: string) => { + setAttachements((prev) => { + const found = prev.find((f) => f.id === id); + if (found?.url) URL.revokeObjectURL(found.url); + return prev.filter((f) => f.id !== id); + }); + }, []); + + const clear = useCallback(() => { + setAttachements((prev) => { + for (const f of prev) if (f.url) URL.revokeObjectURL(f.url); + return []; + }); + }, []); + + const openFileDialog = useCallback(() => { + openRef.current?.(); + }, []); + + const attachments = useMemo( + () => ({ + files: attachements, + add, + remove, + clear, + openFileDialog, + fileInputRef, + }), + [attachements, add, remove, clear, openFileDialog] + ); + + const __registerFileInput = useCallback( + (ref: RefObject, open: () => void) => { + fileInputRef.current = ref.current; + openRef.current = open; + }, + [] + ); + + const controller = useMemo( + () => ({ + textInput: { + value: textInput, + setInput: setTextInput, + clear: clearInput, + }, + attachments, + __registerFileInput, + }), + [textInput, clearInput, attachments, __registerFileInput] + ); + + return ( + + + {children} + + + ); +} + +// ============================================================================ +// Component Context & Hooks +// ============================================================================ + +const LocalAttachmentsContext = createContext(null); + +export const usePromptInputAttachments = () => { + // Dual-mode: prefer provider if present, otherwise use local + const provider = useOptionalProviderAttachments(); + const local = useContext(LocalAttachmentsContext); + const context = provider ?? local; + if (!context) { + throw new Error( + "usePromptInputAttachments must be used within a PromptInput or PromptInputProvider" + ); + } + return context; +}; + +export type PromptInputAttachmentProps = HTMLAttributes & { + data: FileUIPart & {id: string}; + className?: string; +}; + +export function PromptInputAttachment({ + data, + className, + ...props +}: PromptInputAttachmentProps) { + const attachments = usePromptInputAttachments(); + + const filename = data.filename || ""; + + const mediaType = + data.mediaType?.startsWith("image/") && data.url ? "image" : "file"; + const isImage = mediaType === "image"; + + const attachmentLabel = filename || (isImage ? "Image" : "Attachment"); + + return ( + + +
+
+
+ {isImage ? ( + {filename + ) : ( +
+ +
+ )} +
+ +
+ + {attachmentLabel} +
+
+ +
+ {isImage && ( +
+ {filename +
+ )} +
+
+

+ {filename || (isImage ? "Image" : "Attachment")} +

+ {data.mediaType && ( +

+ {data.mediaType} +

+ )} +
+
+
+
+
+ ); +} + +export type PromptInputAttachmentsProps = Omit< + HTMLAttributes, + "children" +> & { + children: (attachment: FileUIPart & {id: string}) => ReactNode; +}; + +export function PromptInputAttachments({ + children, +}: PromptInputAttachmentsProps) { + const attachments = usePromptInputAttachments(); + + if (!attachments.files.length) { + return null; + } + + return attachments.files.map((file) => ( + {children(file)} + )); +} + +export type PromptInputActionAddAttachmentsProps = ComponentProps< + typeof DropdownMenuItem +> & { + label?: string; +}; + +export const PromptInputActionAddAttachments = ({ + label = "Add photos or files", + ...props +}: PromptInputActionAddAttachmentsProps) => { + const attachments = usePromptInputAttachments(); + + return ( + { + e.preventDefault(); + attachments.openFileDialog(); + }} + > + {label} + + ); +}; + +export type PromptInputMessage = { + text?: string; + files?: FileUIPart[]; +}; + +export type PromptInputProps = Omit< + HTMLAttributes, + "onSubmit" | "onError" +> & { + accept?: string; // e.g., "image/*" or leave undefined for any + multiple?: boolean; + // When true, accepts drops anywhere on document. Default false (opt-in). + globalDrop?: boolean; + // Render a hidden input with given name and keep it in sync for native form posts. Default false. + syncHiddenInput?: boolean; + // Minimal constraints + maxFiles?: number; + maxFileSize?: number; // bytes + onError?: (err: { + code: "max_files" | "max_file_size" | "accept"; + message: string; + }) => void; + onSubmit: ( + message: PromptInputMessage, + event: FormEvent + ) => void | Promise; +}; + +export const PromptInput = ({ + className, + accept, + multiple, + globalDrop, + syncHiddenInput, + maxFiles, + maxFileSize, + onError, + onSubmit, + children, + ...props +}: PromptInputProps) => { + // Try to use a provider controller if present + const controller = useOptionalPromptInputController(); + const usingProvider = !!controller; + + // Refs + const inputRef = useRef(null); + const anchorRef = useRef(null); + const formRef = useRef(null); + + // Find nearest form to scope drag & drop + useEffect(() => { + const root = anchorRef.current?.closest("form"); + if (root instanceof HTMLFormElement) { + formRef.current = root; + } + }, []); + + // ----- Local attachments (only used when no provider) + const [items, setItems] = useState<(FileUIPart & {id: string})[]>([]); + const files = usingProvider ? controller.attachments.files : items; + + const openFileDialogLocal = useCallback(() => { + inputRef.current?.click(); + }, []); + + const matchesAccept = useCallback( + (f: File) => { + if (!accept || accept.trim() === "") { + return true; + } + if (accept.includes("image/*")) { + return f.type.startsWith("image/"); + } + // NOTE: keep simple; expand as needed + return true; + }, + [accept] + ); + + const addLocal = useCallback( + (fileList: File[] | FileList) => { + const incoming = Array.from(fileList); + const accepted = incoming.filter((f) => matchesAccept(f)); + if (incoming.length && accepted.length === 0) { + onError?.({ + code: "accept", + message: "No files match the accepted types.", + }); + return; + } + const withinSize = (f: File) => + maxFileSize ? f.size <= maxFileSize : true; + const sized = accepted.filter(withinSize); + if (accepted.length > 0 && sized.length === 0) { + onError?.({ + code: "max_file_size", + message: "All files exceed the maximum size.", + }); + return; + } + + setItems((prev) => { + const capacity = + typeof maxFiles === "number" + ? Math.max(0, maxFiles - prev.length) + : undefined; + const capped = + typeof capacity === "number" ? sized.slice(0, capacity) : sized; + if (typeof capacity === "number" && sized.length > capacity) { + onError?.({ + code: "max_files", + message: "Too many files. Some were not added.", + }); + } + const next: (FileUIPart & {id: string})[] = []; + for (const file of capped) { + next.push({ + id: nanoid(), + type: "file", + url: URL.createObjectURL(file), + mediaType: file.type, + filename: file.name, + }); + } + return prev.concat(next); + }); + }, + [matchesAccept, maxFiles, maxFileSize, onError] + ); + + const add = usingProvider + ? (files: File[] | FileList) => controller.attachments.add(files) + : addLocal; + + const remove = usingProvider + ? (id: string) => controller.attachments.remove(id) + : (id: string) => + setItems((prev) => { + const found = prev.find((file) => file.id === id); + if (found?.url) { + URL.revokeObjectURL(found.url); + } + return prev.filter((file) => file.id !== id); + }); + + const clear = usingProvider + ? () => controller.attachments.clear() + : () => + setItems((prev) => { + for (const file of prev) { + if (file.url) { + URL.revokeObjectURL(file.url); + } + } + return []; + }); + + const openFileDialog = usingProvider + ? () => controller.attachments.openFileDialog() + : openFileDialogLocal; + + // Let provider know about our hidden file input so external menus can call openFileDialog() + useEffect(() => { + if (!usingProvider) return; + controller.__registerFileInput(inputRef, () => inputRef.current?.click()); + }, [usingProvider, controller]); + + // Note: File input cannot be programmatically set for security reasons + // The syncHiddenInput prop is no longer functional + useEffect(() => { + if (syncHiddenInput && inputRef.current && files.length === 0) { + inputRef.current.value = ""; + } + }, [files, syncHiddenInput]); + + // Attach drop handlers on nearest form and document (opt-in) + useEffect(() => { + const form = formRef.current; + if (!form) return; + + const onDragOver = (e: DragEvent) => { + if (e.dataTransfer?.types?.includes("Files")) { + e.preventDefault(); + } + }; + const onDrop = (e: DragEvent) => { + if (e.dataTransfer?.types?.includes("Files")) { + e.preventDefault(); + } + if (e.dataTransfer?.files && e.dataTransfer.files.length > 0) { + add(e.dataTransfer.files); + } + }; + form.addEventListener("dragover", onDragOver); + form.addEventListener("drop", onDrop); + return () => { + form.removeEventListener("dragover", onDragOver); + form.removeEventListener("drop", onDrop); + }; + }, [add]); + + useEffect(() => { + if (!globalDrop) return; + + const onDragOver = (e: DragEvent) => { + if (e.dataTransfer?.types?.includes("Files")) { + e.preventDefault(); + } + }; + const onDrop = (e: DragEvent) => { + if (e.dataTransfer?.types?.includes("Files")) { + e.preventDefault(); + } + if (e.dataTransfer?.files && e.dataTransfer.files.length > 0) { + add(e.dataTransfer.files); + } + }; + document.addEventListener("dragover", onDragOver); + document.addEventListener("drop", onDrop); + return () => { + document.removeEventListener("dragover", onDragOver); + document.removeEventListener("drop", onDrop); + }; + }, [add, globalDrop]); + + useEffect( + () => () => { + if (!usingProvider) { + for (const f of files) { + if (f.url) URL.revokeObjectURL(f.url); + } + } + }, + [usingProvider, files] + ); + + const handleChange: ChangeEventHandler = (event) => { + if (event.currentTarget.files) { + add(event.currentTarget.files); + } + }; + + const convertBlobUrlToDataUrl = async (url: string): Promise => { + const response = await fetch(url); + const blob = await response.blob(); + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onloadend = () => resolve(reader.result as string); + reader.onerror = reject; + reader.readAsDataURL(blob); + }); + }; + + const ctx = useMemo( + () => ({ + files: files.map((item) => ({...item, id: item.id})), + add, + remove, + clear, + openFileDialog, + fileInputRef: inputRef, + }), + [files, add, remove, clear, openFileDialog] + ); + + const handleSubmit: FormEventHandler = (event) => { + event.preventDefault(); + + const form = event.currentTarget; + const text = usingProvider + ? controller.textInput.value + : (() => { + const formData = new FormData(form); + return (formData.get("message") as string) || ""; + })(); + + // Reset form immediately after capturing text to avoid race condition + // where user input during async blob conversion would be lost + if (!usingProvider) { + form.reset(); + } + + // Convert blob URLs to data URLs asynchronously + Promise.all( + files.map(async ({id, ...item}) => { + if (item.url && item.url.startsWith("blob:")) { + return { + ...item, + url: await convertBlobUrlToDataUrl(item.url), + }; + } + return item; + }) + ).then((convertedFiles: FileUIPart[]) => { + try { + const result = onSubmit({text, files: convertedFiles}, event); + + // Handle both sync and async onSubmit + if (result instanceof Promise) { + result + .then(() => { + clear(); + if (usingProvider) { + controller.textInput.clear(); + } + }) + .catch(() => { + // Don't clear on error - user may want to retry + }); + } else { + // Sync function completed without throwing, clear attachments + clear(); + if (usingProvider) { + controller.textInput.clear(); + } + } + } catch (error) { + // Don't clear on error - user may want to retry + } + }); + }; + + // Render with or without local provider + const inner = ( + <> +