diff --git a/js/genkit/.guides/config.json b/js/genkit/.guides/config.json new file mode 100644 index 0000000000..21d4b59a96 --- /dev/null +++ b/js/genkit/.guides/config.json @@ -0,0 +1,39 @@ +{ + "mcpServers": { + "genkit": { + "command": "genkit", + "args": ["mcp"] + } + }, + "docs": [ + { + "url": "https://genkit.dev/docs/models.md", + "name": "generate-content", + "title": "Generate Content", + "description": "how to generate content (text, structured data, images, videos) with Genkit." + }, + { + "url": "https://genkit.dev/docs/flows.md", + "name": "flows", + "title": "Using Flows to build GenAI Workflows", + "description": "how to construct strongly typed AI workflows with Genkit." + }, + { + "url": "https://genkit.dev/docs/tool-calling.md", + "name": "tool-calling", + "title": "Tool Calling", + "description": "an in-depth guide to providing tools/functions to Genkit for GenAI" + }, + { + "url": "https://genkit.dev/docs/interrupts.md", + "name": "tool-calling/interrupts", + "title": "Interrupts (Tool Calling with Human-in-the-Loop)", + "description": "instructions on how to use interrupts to provide human-in-the-loop capabilities to Genkit agents" + }, + { + "url": "https://genkit.dev/docs/context.md", + "name": "context", + "description": "how to pass context to tools and flows without exposing sensitive data to the LLM" + } + ] +} diff --git a/js/genkit/.guides/setup.md b/js/genkit/.guides/setup.md new file mode 100644 index 0000000000..4ca0078282 --- /dev/null +++ b/js/genkit/.guides/setup.md @@ -0,0 +1,30 @@ +Follow these instructions to set up Genkit in the current codebase. These instructions are general-purpose and have not been written with specific codebase knowledge, so use your best judgement when following them. + +0. Tell the user "I'm going to check out your workspace and set you up to use Genkit for GenAI workflows." +1. If the current workspace is empty or is a starter template, your goal will be to create a simple image generation flow that allows someone to generate an image based on a prompt and selectable style. If the current workspace is not empty, you will create a simple example flow to help get the user started. +2. Check to see if any Genkit provider plugin (such as `@genkit-ai/google-genai` or `@genkit-ai/oai-compat` or others, may start with `genkitx-*`) is installed. If not, install `@genkit-ai/google-genai`. If this is a Next.js app, install `@genkit-ai/next` as well. +3. Search the codebase for the exact string `genkit(` (remember to escape regexes properly) which would indicate that the user has already set up Genkit in the codebase. If found, no need to set it up again, tell the user "Genkit is already configured in this app." and exit this workflow. +4. Create an `ai` directory in the primary source directory of the project (this may be e.g. `src` but is project-dependent). +5. Create `{sourceDir}/ai/genkit.ts` and populate it using the example below. DO NOT add a `next` plugin to the file, ONLY add a model provider plugin to the plugins array: + +```ts +import { genkit, z } from 'genkit'; +import { googleAI } from '@genkit-ai/google-genai'; + +export const ai = genkit({ + plugins: [googleAI()], +}); + +export { z }; +``` + +6. Create `{sourceDir}/ai/tools` and `{sourceDir}/ai/flows` directories, but leave them empty for now. +7. Create `{sourceDir}/ai/index.ts` and populate it with the following (change the import to match import aliases in `tsconfig.json` as needed): + +```ts +import './genkit.js'; +// import each created flow, tool, etc. here for use in the Genkit Dev UI +``` + +8. Add a `genkit:ui` script to `package.json` that runs `npx genkit-cli start -- npx tsx {sourceDir}/ai/index.ts` (or `pnpm dlx` or `yarn dlx` for those package managers). DO NOT try to run the script now. +9. Tell the user "Genkit is now configured and ready for use." as setup is now complete. Also remind them to set appropriate env variables (e.g. `GEMINI_API_KEY` for Google providers). Wait for the user to prompt further before creating any specific flows. diff --git a/js/genkit/.guides/style.md b/js/genkit/.guides/style.md new file mode 100644 index 0000000000..170ec60592 --- /dev/null +++ b/js/genkit/.guides/style.md @@ -0,0 +1 @@ +- Prefer destructuring generate calls e.g. `const {text} = await ai.generate(...)` diff --git a/js/genkit/.guides/usage.md b/js/genkit/.guides/usage.md new file mode 100644 index 0000000000..c30842b3a2 --- /dev/null +++ b/js/genkit/.guides/usage.md @@ -0,0 +1,49 @@ +## Basic Example + +```ts +import { ai, z } from "@/ai/genkit"; // or wherever genkit is initialized + +const myTool = ai.defineTool({name, description, inputSchema: z.object(...)}, (input) => {...}); + +const {text} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash'), // optional if default model is configured + system: "the system instructions", // optional + prompt: "the content of the prompt", + // OR, for multi-modal content + prompt: [{text: "what is this image?"}, {media: {url: "data:image/png;base64,..."}}], + tools: [myTool], +}); + +// structured output +const CharacterSchema = z.object({...}); // make sure to use .describe() on fields +const {output} = await ai.generate({ + prompt: "generate an RPG character", + output: {schema: CharacterSchema}, +}); +``` + +## Important API Clarifications + +**IMPORTANT:** This app uses Genkit v1.19 which has changed significantly from pre-1.0 versions. Important changes include: + +```ts +const response = await ai.generate(...); + +response.text // CORRECT 1.x syntax +response.text() // INCORRECT pre-1.0 syntax + +response.output // CORRECT 1.x syntax +response.output() // INCORRECT pre-1.0 syntax + +const {stream, response} = ai.generateStream(...); // IMPORTANT: no `await` needed +for await (const chunk of stream) { } // CORRECT 1.x syntax +for await (const chunk of stream()) { } // INCORRECT pre-1.0 syntax +await response; // CORRECT 1.x syntax +await response(); // INCORRECT pre-1.0 syntax +await ai.generate({..., model: googleAI.model('gemini-2.5-flash')}); // CORRECT 1.x syntax +await ai.generate({..., model: gemini15Pro}); // INCORRECT pre-1.0 syntax +``` + +- Use `import {z} from "genkit"` when you need Zod to get an implementation consistent with Genkit. +- When defining Zod schemas, ONLY use basic scalar, object, and array types. Use `.optional()` when needed and `.describe('...')` to add descriptions for output schemas. +- Genkit has many capabilities, make sure to read docs when you need to use them. diff --git a/js/plugins/express/.guides/usage.md b/js/plugins/express/.guides/usage.md new file mode 100644 index 0000000000..35c9cd7e17 --- /dev/null +++ b/js/plugins/express/.guides/usage.md @@ -0,0 +1,110 @@ +Genkit's Express integration makes it easy to expose Genkit flows as Express API endpoints: + +```ts +import express from 'express'; +import { expressHandler } from '@genkit-ai/express'; +import { simpleFlow } from './flows/simple-flow.js'; + +const app = express(); +app.use(express.json()); + +app.post('/simpleFlow', expressHandler(simpleFlow)); + +app.listen(8080); +``` + +You can also handle auth using context providers: + +```ts +import { UserFacingError } from 'genkit'; +import { ContextProvider, RequestData } from 'genkit/context'; + +const context: ContextProvider = (req: RequestData) => { + if (req.headers['authorization'] !== 'open sesame') { + throw new UserFacingError('PERMISSION_DENIED', 'not authorized'); + } + return { + auth: { + user: 'Ali Baba', + }, + }; +}; + +app.post( + '/simpleFlow', + authMiddleware, + expressHandler(simpleFlow, { context }) +); +``` + +Flows and actions exposed using the `expressHandler` function can be accessed using `genkit/beta/client` library: + +```ts +import { runFlow, streamFlow } from 'genkit/beta/client'; + +const result = await runFlow({ + url: `http://localhost:${port}/simpleFlow`, + input: 'say hello', +}); + +console.log(result); // hello +``` + +```ts +// set auth headers (when using auth policies) +const result = await runFlow({ + url: `http://localhost:${port}/simpleFlow`, + headers: { + Authorization: 'open sesame', + }, + input: 'say hello', +}); + +console.log(result); // hello +``` + +```ts +// and streamed +const result = streamFlow({ + url: `http://localhost:${port}/simpleFlow`, + input: 'say hello', +}); +for await (const chunk of result.stream) { + console.log(chunk); +} +console.log(await result.output); +``` + +You can use `startFlowServer` to quickly expose multiple flows and actions: + +```ts +import { startFlowServer } from '@genkit-ai/express'; +import { genkit } from 'genkit'; + +const ai = genkit({}); + +export const menuSuggestionFlow = ai.defineFlow( + { + name: 'menuSuggestionFlow', + }, + async (restaurantTheme) => { + // ... + } +); + +startFlowServer({ + flows: [menuSuggestionFlow], +}); +``` + +You can also configure the server: + +```ts +startFlowServer({ + flows: [menuSuggestionFlow], + port: 4567, + cors: { + origin: '*', + }, +}); +``` diff --git a/js/plugins/google-genai/.guides/docs/editing-images.prompt b/js/plugins/google-genai/.guides/docs/editing-images.prompt new file mode 100644 index 0000000000..8ae25190d8 --- /dev/null +++ b/js/plugins/google-genai/.guides/docs/editing-images.prompt @@ -0,0 +1,106 @@ +--- +title: Edit images with `gemini-2.5-flash-image-preview` (aka "Nano Banana") +description: read this if you need to perform sophisticated image edits such as background removal, post matching, character replacement, relighting, on an existing image +--- + +The `gemini-2.5-flash-image-preview` model (also known as "Nano Banana") can perform sophisticated image edits. + +- You must ALWAYS add `{config: {responseModalities: ['TEXT', 'IMAGE']}}` to your `ai.generate` calls when using this model. + + +```ts +// generate an image from a prompt + +import { ai } from "@/ai/genkit"; // or wherever genkit is initialized +import { googleAI } from "@genkit-ai/google-genai"; + +const {media} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-image-preview'), + config: {responseModalities: ['TEXT', 'IMAGE']}}, + prompt: "generate a picture of a unicorn wearing a space suit on the moon", +}); + +return media.url; // --> "data:image/png;base64,..." +``` + + + +```ts +// edit an image with a text prompt + +import { ai } from "@/ai/genkit"; // or wherever genkit is initialized +import { googleAI } from "@genkit-ai/google-genai"; + +const {media} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-image-preview'), + config: {responseModalities: ['TEXT', 'IMAGE']}}, + prompt: [ + {text: "change the person's outfit to a banana costume"}, + {media: {url: "https://..." /* or 'data:...' */}}, + ], +}); + +return media.url; // --> "data:image/png;base64,..." +``` + + + +```ts +// combine multiple images together + +import { ai } from "@/ai/genkit"; // or wherever genkit is initialized +import { googleAI } from "@genkit-ai/google-genai"; + +const {personImageUri, animalImageUri, sceneryImageUri} = await loadImages(...); + +const {media} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-image-preview'), + config: {responseModalities: ['TEXT', 'IMAGE']}}, + prompt: [ + // the model tends to match aspect ratio of the *last* image provided + {text: "[PERSON]:\n"}, + {media: {url: personImageUri}}, + {text: "\n[ANIMAL]:\n"}, + {media: {url: animalImageUri}}, + {text; "\n[SCENERY]:\n"}, + // IMPORTANT: the model tends to match aspect ratio of the *last* image provided + {media: {url: sceneryImageUri}}, + {text: "make an image of [PERSON] riding a giant version of [ANIMAL] with a background of [SCENERY]"}, + ], +}); + +return media.url; // --> "data:image/png;base64,..." +``` + + + +```ts +// use an annotated image to guide generation + +import { ai } from "@/ai/genkit"; // or wherever genkit is initialized +import { googleAI } from "@genkit-ai/google-genai"; + +const originalImageUri = "data:..."; // the original image +const annotatedImageUri = "data:..."; // the image with annotations on top of it + +const {media} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-image-preview'), + config: {responseModalities: ['TEXT', 'IMAGE']}}, + prompt: [ + + {text: "follow the instructions in the following annotated image:"}, + {media: {url: annotatedImageUri}}, + {text: "\n\napply the annotated instructions to the original image, making sure to follow the instructions of the annotations.\n\noriginal image:\n"}, + {media: {url: originalImageUri}}, + ], +}); + +return media.url; // --> "data:image/png;base64,..." +``` + + +## Prompting tips for image editing + +- For complex edits prefer a chain of small edits to a single complex edit. Feed the output of one generation as input to the next. +- Be specific and detailed about the edits you want to make. +- Be clear whether added images are meant as style or subject references. \ No newline at end of file diff --git a/js/plugins/google-genai/.guides/docs/generating-speech.prompt b/js/plugins/google-genai/.guides/docs/generating-speech.prompt new file mode 100644 index 0000000000..df4d9410e3 --- /dev/null +++ b/js/plugins/google-genai/.guides/docs/generating-speech.prompt @@ -0,0 +1,192 @@ +--- +title: Generating Speech with Gemini +description: read this to understand how to generate realistic speech audio from a text script +--- + +The Google Genai plugin provides access to text-to-speech capabilities through Gemini TTS models. These models can convert text into natural-sounding speech for various applications. + +#### Basic Usage + +To generate audio using a TTS model: + +```ts +import { googleAI } from '@genkit-ai/google-genai'; +import { writeFile } from 'node:fs/promises'; +import wav from 'wav'; // npm install wav && npm install -D @types/wav + +const ai = genkit({ + plugins: [googleAI()], +}); + +const { media } = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-preview-tts'), + config: { + responseModalities: ['AUDIO'], + speechConfig: { + voiceConfig: { + prebuiltVoiceConfig: { voiceName: 'Algenib' }, + }, + }, + }, + prompt: 'Say that Genkit is an amazing Gen AI library', +}); + +if (!media) { + throw new Error('no media returned'); +} +const audioBuffer = Buffer.from(media.url.substring(media.url.indexOf(',') + 1), 'base64'); +// The googleAI plugin returns raw PCM data, which we convert to WAV format. +await writeFile('output.wav', await toWav(audioBuffer)); + +async function toWav(pcmData: Buffer, channels = 1, rate = 24000, sampleWidth = 2): Promise { + return new Promise((resolve, reject) => { + // This code depends on `wav` npm library. + const writer = new wav.Writer({ + channels, + sampleRate: rate, + bitDepth: sampleWidth * 8, + }); + + let bufs = [] as any[]; + writer.on('error', reject); + writer.on('data', function (d) { + bufs.push(d); + }); + writer.on('end', function () { + resolve(Buffer.concat(bufs).toString('base64')); + }); + + writer.write(pcmData); + writer.end(); + }); +} +``` + +#### Multi-speaker Audio Generation + +You can generate audio with multiple speakers, each with their own voice: + +```ts +const response = await ai.generate({ + model: googleAI.model('gemini-2.5-flash-preview-tts'), + config: { + responseModalities: ['AUDIO'], + speechConfig: { + multiSpeakerVoiceConfig: { + speakerVoiceConfigs: [ + { + speaker: 'Speaker1', + voiceConfig: { + prebuiltVoiceConfig: { voiceName: 'Algenib' }, + }, + }, + { + speaker: 'Speaker2', + voiceConfig: { + prebuiltVoiceConfig: { voiceName: 'Achernar' }, + }, + }, + ], + }, + }, + }, + prompt: `Here's the dialog: + Speaker1: "Genkit is an amazing Gen AI library!" + Speaker2: "I thought it was a framework."`, +}); +``` + +When using multi-speaker configuration, the model automatically detects speaker labels in the text (like "Speaker1:" and "Speaker2:") and applies the corresponding voice to each speaker's lines. + +#### Configuration Options + +The Gemini TTS models support various configuration options: + +##### Voice Selection + +You can choose from different pre-built voices with unique characteristics: + +```ts +speechConfig: { + voiceConfig: { + prebuiltVoiceConfig: { + voiceName: 'Algenib' // Other options: 'Achernar', 'Ankaa', etc. + }, + }, +} +``` + +Full list of available voices: + +- `Zephyr`: Bright +- `Puck`: Upbeat +- `Charon`: Informative +- `Kore`: Firm +- `Fenrir`: Excitable +- `Leda`: Youthful +- `Orus`: Firm +- `Aoede`: Breezy +- `Callirrhoe`: Easy-going +- `Autonoe`: Bright +- `Enceladus`: Breathy +- `Iapetus`: Clear +- `Umbriel`: Easy-going +- `Algieba`: Smooth +- `Despina`: Smooth +- `Erinome`: Clear +- `Algenib`: Gravelly +- `Rasalgethi`: Informative +- `Laomedeia`: Upbeat +- `Achernar`: Soft +- `Alnilam`: Firm +- `Schedar`: Even +- `Gacrux`: Mature +- `Pulcherrima`: Forward +- `Achird`: Friendly +- `Zubenelgenubi`: Casual +- `Vindemiatrix`: Gentle +- `Sadachbia`: Lively +- `Sadaltager`: Knowledgeable +- `Sulafat`: Warm + +##### Speech Emphasis + +You can use markdown-style formatting in your prompt to add emphasis: + +- Bold text (`**like this**`) for stronger emphasis +- Italic text (`*like this*`) for moderate emphasis + +Example: + +```ts +prompt: 'Genkit is an **amazing** Gen AI *library*!'; +``` + +##### Advanced Speech Parameters + +For more control over the generated speech: + +```ts +speechConfig: { + voiceConfig: { + prebuiltVoiceConfig: { + voiceName: 'Algenib', + speakingRate: 1.0, // Range: 0.25 to 4.0, default is 1.0 + pitch: 0.0, // Range: -20.0 to 20.0, default is 0.0 + volumeGainDb: 0.0, // Range: -96.0 to 16.0, default is 0.0 + }, + }, +} +``` + +- `speakingRate`: Controls the speed of speech (higher values = faster speech) +- `pitch`: Adjusts the pitch of the voice (higher values = higher pitch) +- `volumeGainDb`: Controls the volume (higher values = louder) + +For more detailed information about the Gemini TTS models and their configuration options, see the [Google AI Speech Generation documentation](https://ai.google.dev/gemini-api/docs/speech-generation). + +## Next Steps + +- Learn about [generating content](/docs/models) to understand how to use these models effectively +- Explore [creating flows](/docs/flows) to build structured AI workflows +- To use the Gemini API at enterprise scale or leverage Vertex vector search and Model Garden, see the [Vertex AI plugin](/docs/integrations/vertex-ai) \ No newline at end of file diff --git a/js/plugins/google-genai/.guides/docs/search-and-urls.prompt b/js/plugins/google-genai/.guides/docs/search-and-urls.prompt new file mode 100644 index 0000000000..1eae75b95b --- /dev/null +++ b/js/plugins/google-genai/.guides/docs/search-and-urls.prompt @@ -0,0 +1,34 @@ +--- +title: Search Grounding and URL Context +description: read this to understand how to ground results on Google search or automatically ingest URLs in prompts to understand their contents +--- + +When using Gemini models, you can add configuration to automatically ground the prompt in Google search: + +```ts +const {text} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash'), + // this includes the "built-in" search grounding tool + config: { + tools: [ + {googleSearch: {}}, + ] + } +}) +``` + +If your prompt contains URLs that you want the model to read and understand, you can add the URL context tool: + +```ts +const {text} = await ai.generate({ + model: googleAI.model('gemini-2.5-flash'), + // this includes the "built-in" url fetching tool + config: { + tools: [ + {urlContext: {}}, + ] + } +}) +``` + +You can use either or both of these tools to improve the "groundedness" of your generated model responses. \ No newline at end of file diff --git a/js/plugins/next/.guides/usage.prompt b/js/plugins/next/.guides/usage.prompt new file mode 100644 index 0000000000..4888dbd066 --- /dev/null +++ b/js/plugins/next/.guides/usage.prompt @@ -0,0 +1,104 @@ +- DO NOT add anything to the `plugins` array for the Genkit Next.js plugin. +- Use the Genkit Next.js plugin and its `appRoute` helper to provide API endpoints for AI actions +- When importing a flow from code that runs in the browser, ONLY import the type +- Flows can be streaming or non-streaming + + +```ts +// src/ai/flows/generate-animal.ts +import { ai, z } from "@/ai/genkit"; + +export const AnimalSchema = z.object({ +// ... detailed output schema with descriptions +}); +export type Animal = z.infer; + +export const generateAnimal = ai.defineFlow({ + name: 'generateAnimal', + inputSchema: z.string().describe('a text description of the type of animal to create'), + outputSchema: AnimalSchema, +}, (input) => { + const {output: animal} = await ai.generate(/* fill in generate details */); + return animal; +}); + +// src/app/api/generate-animal/route.ts +import { generateAnimal } from "@/ai/flows/generate-animal"; +import { appRoute } from '@genkit-ai/next'; + +export const POST = appRoute(generateAnimal); + +// src/app/page.tsx +import type { generateAnimal, Animal } from "@/ai/flows/generate-animal"; // import flow type for strong typing of runFlow +import { runFlow } from "@genkit-ai/next/client"; + +export default function Page() { + const [animal, setAnimal] = useState(null); + const handleGenerateAnimal = async (input: string) => { + const animal = await runFlow({ + url: '/api/generate-animal', // use the URL defined by your route.ts + input, + }); + + setAnimal(animal); + } + return <>{/_ ... _/}; +} +``` + + +For streaming: + + +```ts +// src/ai/flows/generate-animal.ts +export const generateAnimal = ai.defineFlow({ + name: 'generateAnimal', + // inputSchema, outputSchema -- same as non-streaming + streamSchema: AnimalSchema, // can be same or different as output schema +}, (input, {sendChunk}) => { + const {stream, response} = ai.generateStream(/* fill in generate details */); + for (const chunk of stream) { + sendChunk(chunk.output); + } + const {output} = await response; + return output; +}); + +// src/app/page.tsx +import { streamFlow } from '@genkit-ai/next/client'; +import type { generateAnimal } from "@/ai/flows/generate-animal"; + +// ... Page ... + const {stream, output} = streamFlow({ + url: '/api/generate-animal', + input, + }); + for await (const chunk of stream) { + console.log(chunk); // do something with the chunk + } + console.log(await output); // final output is a promise, must be awaited +// ... /Page ... +``` + + +Auth: `appRoute` accepts optional context provider function which can handle auth headers +and return parsed context object which will be made available to the flow and child +actions. + + +```ts +// src/app/api/generate-animal/route.ts +import { generateAnimal } from "@/ai/flows/generate-animal"; +import { appRoute } from '@genkit-ai/next'; + +export const POST = appRoute(generateAnimal, { + contextProvider: async (req) => { + // parseAuthHeader function can throw a user facing error like: + // import { UserFacingError } from 'genkit'; + // throw new UserFacingError('PERMISSION_DENIED', 'Permission Denied'); + return {auth: await parseAuthHeader(req.headers)} + } +}); +``` + \ No newline at end of file diff --git a/js/plugins/next/README.md b/js/plugins/next/README.md index 0037b22591..6c4bff9294 100644 --- a/js/plugins/next/README.md +++ b/js/plugins/next/README.md @@ -20,15 +20,15 @@ const simpleFlow = ai.defineFlow( ```ts // /app/api/simpleFlow/route.ts import { simpleFlow } from '@/genkit/simpleFlow'; -import { appRoute } from '@genkit-ai/nextjs'; +import { appRoute } from '@genkit-ai/next'; export const POST = appRoute(simpleFlow); ``` -APIs can be called with the generic `genkit/beta/client` library, or `@genkit-ai/nextjs/client` +APIs can be called with the generic `genkit/beta/client` library, or `@genkit-ai/next/client` ```ts -import { runFlow, streamFlow } from '@genkit-ai/nextjs/client'; +import { runFlow, streamFlow } from '@genkit-ai/next/client'; import { simpleFlow } from '@/genkit/simpleFlow'; const result = await runFlow({ @@ -50,14 +50,14 @@ const result = await runFlow({ console.log(result); // hello // and streamed -const result = streamFlow({ +const { stream, output } = streamFlow({ url: '/api/simpleFlow', input: 'say hello', }); -for await (const chunk of result.stream()) { +for await (const chunk of stream) { console.log(chunk.output); } -console.log(await result.output()); +console.log(await output); // output is a promise, must be awaited ``` The sources for this package are in the main [Genkit](https://github.com/firebase/genkit) repo. Please file issues and pull requests against that repo. diff --git a/js/testapps/next/package.json b/js/testapps/next/package.json index b6640a60a9..f977de3efb 100644 --- a/js/testapps/next/package.json +++ b/js/testapps/next/package.json @@ -2,7 +2,7 @@ "name": "nextjs-sample", "type": "module", "version": "0.0.1-dev.1", - "description": "Sample app to test @genkit-ai/nextjs", + "description": "Sample app to test @genkit-ai/next", "main": "lib/index.js", "scripts": { "vendor": "cd ../../plugins/next && pnpm pack --pack-destination ../../testapps/next && cd - && pnpm install ./genkit-ai-next*.tgz --filter nextjs-sample",