From c3ec037c78827e4e0758b159e0fcb30e5109069d Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 15 Apr 2025 12:35:05 -0400 Subject: [PATCH 01/16] feat(vertexai): Migrate to `GenAI` and add GoogleAI support --- .changeset/tall-zoos-stare.md | 6 + common/api-review/vertexai.api.md | 137 ++++-- docs-devsite/_toc.yaml | 14 +- docs-devsite/vertexai.genai.md | 64 +++ ...ertexaierror.md => vertexai.genaierror.md} | 24 +- docs-devsite/vertexai.genaimodel.md | 37 ++ docs-devsite/vertexai.genaioptions.md | 35 ++ docs-devsite/vertexai.generativemodel.md | 10 +- .../vertexai.imagengenerationconfig.md | 2 + docs-devsite/vertexai.imagenmodel.md | 12 +- docs-devsite/vertexai.md | 299 +++++++++++-- docs-devsite/vertexai.modelparams.md | 2 +- docs-devsite/vertexai.requestoptions.md | 2 +- docs-devsite/vertexai.safetysetting.md | 4 +- docs-devsite/vertexai.schemashared.md | 4 +- docs-devsite/vertexai.vertexai.md | 44 -- docs-devsite/vertexai.vertexaimodel.md | 66 --- packages/firebase/package.json | 12 + packages/vertexai/.eslintrc.js | 3 +- packages/vertexai/src/api.test.ts | 126 +++--- packages/vertexai/src/api.ts | 150 ++++++- .../src/backwards-compatbility.test.ts | 85 ++++ packages/vertexai/src/constants.ts | 9 + packages/vertexai/src/errors.ts | 20 +- packages/vertexai/src/googleAIMappers.test.ts | 400 ++++++++++++++++++ packages/vertexai/src/googleAIMappers.ts | 235 ++++++++++ packages/vertexai/src/helpers.test.ts | 140 ++++++ packages/vertexai/src/helpers.ts | 86 ++++ packages/vertexai/src/index.node.ts | 28 +- packages/vertexai/src/index.ts | 27 +- .../src/methods/chat-session-helpers.ts | 34 +- .../vertexai/src/methods/chat-session.test.ts | 4 +- .../vertexai/src/methods/count-tokens.test.ts | 41 +- packages/vertexai/src/methods/count-tokens.ts | 11 +- .../src/methods/generate-content.test.ts | 95 ++++- .../vertexai/src/methods/generate-content.ts | 31 +- ...exai-model.test.ts => genai-model.test.ts} | 51 +-- .../{vertexai-model.ts => genai-model.ts} | 90 ++-- .../src/models/generative-model.test.ts | 20 +- .../vertexai/src/models/generative-model.ts | 10 +- .../vertexai/src/models/imagen-model.test.ts | 22 +- packages/vertexai/src/models/imagen-model.ts | 14 +- packages/vertexai/src/models/index.ts | 2 +- packages/vertexai/src/public-types.ts | 126 +++++- .../vertexai/src/requests/request-helpers.ts | 12 +- .../vertexai/src/requests/request.test.ts | 72 ++-- packages/vertexai/src/requests/request.ts | 67 +-- .../vertexai/src/requests/response-helpers.ts | 28 +- .../src/requests/schema-builder.test.ts | 4 +- .../vertexai/src/requests/schema-builder.ts | 8 +- .../src/requests/stream-reader.test.ts | 44 +- .../vertexai/src/requests/stream-reader.ts | 58 ++- packages/vertexai/src/service.test.ts | 18 +- packages/vertexai/src/service.ts | 18 +- packages/vertexai/src/types/enums.ts | 11 +- packages/vertexai/src/types/error.ts | 9 +- packages/vertexai/src/types/googleAI/index.ts | 19 + .../vertexai/src/types/googleAI/requests.ts | 29 ++ .../vertexai/src/types/googleAI/responses.ts | 46 ++ .../vertexai/src/types/imagen/requests.ts | 3 + packages/vertexai/src/types/internal.ts | 9 +- packages/vertexai/src/types/requests.ts | 5 + packages/vertexai/src/types/responses.ts | 10 +- packages/vertexai/src/types/schema.ts | 5 +- packages/vertexai/test-utils/mock-response.ts | 3 + 65 files changed, 2501 insertions(+), 611 deletions(-) create mode 100644 .changeset/tall-zoos-stare.md create mode 100644 docs-devsite/vertexai.genai.md rename docs-devsite/{vertexai.vertexaierror.md => vertexai.genaierror.md} (53%) create mode 100644 docs-devsite/vertexai.genaimodel.md create mode 100644 docs-devsite/vertexai.genaioptions.md delete mode 100644 docs-devsite/vertexai.vertexai.md delete mode 100644 docs-devsite/vertexai.vertexaimodel.md create mode 100644 packages/vertexai/src/backwards-compatbility.test.ts create mode 100644 packages/vertexai/src/googleAIMappers.test.ts create mode 100644 packages/vertexai/src/googleAIMappers.ts create mode 100644 packages/vertexai/src/helpers.test.ts create mode 100644 packages/vertexai/src/helpers.ts rename packages/vertexai/src/models/{vertexai-model.test.ts => genai-model.test.ts} (65%) rename packages/vertexai/src/models/{vertexai-model.ts => genai-model.ts} (61%) create mode 100644 packages/vertexai/src/types/googleAI/index.ts create mode 100644 packages/vertexai/src/types/googleAI/requests.ts create mode 100644 packages/vertexai/src/types/googleAI/responses.ts diff --git a/.changeset/tall-zoos-stare.md b/.changeset/tall-zoos-stare.md new file mode 100644 index 00000000000..f4e7601b4a4 --- /dev/null +++ b/.changeset/tall-zoos-stare.md @@ -0,0 +1,6 @@ +--- +'firebase': minor +'@firebase/vertexai': minor +--- + +Add support for the Google AI API, enabling usage in a free tier, and migrate from `VertexAI` naming to `GenAI`. diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index e7f00c2f4e0..66012360050 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -18,6 +18,18 @@ export class ArraySchema extends Schema { toJSON(): SchemaRequest; } +// @public +export type Backend = GoogleAIBackend | VertexAIBackend; + +// @public +export const BackendType: { + readonly VERTEX_AI: "VERTEX_AI"; + readonly GOOGLE_AI: "GOOGLE_AI"; +}; + +// @public +export type BackendType = (typeof BackendType)[keyof typeof BackendType]; + // @public export interface BaseParams { // (undocumented) @@ -239,6 +251,60 @@ export interface FunctionResponsePart { text?: never; } +// @public +export interface GenAI { + app: FirebaseApp; + backend: Backend; + // @deprecated + location: string; +} + +// @public +export class GenAIError extends FirebaseError { + constructor(code: GenAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); + // (undocumented) + readonly code: GenAIErrorCode; + // (undocumented) + readonly customErrorData?: CustomErrorData | undefined; +} + +// @public +const enum GenAIErrorCode { + API_NOT_ENABLED = "api-not-enabled", + ERROR = "error", + FETCH_ERROR = "fetch-error", + INVALID_CONTENT = "invalid-content", + INVALID_SCHEMA = "invalid-schema", + NO_API_KEY = "no-api-key", + NO_APP_ID = "no-app-id", + NO_MODEL = "no-model", + NO_PROJECT_ID = "no-project-id", + PARSE_FAILED = "parse-failed", + REQUEST_ERROR = "request-error", + RESPONSE_ERROR = "response-error", + UNSUPPORTED = "unsupported" +} + +export { GenAIErrorCode } + +export { GenAIErrorCode as VertexAIErrorCode } + +// @public +export abstract class GenAIModel { + // @internal + protected constructor(genAI: GenAI, modelName: string); + // @internal (undocumented) + protected _apiSettings: ApiSettings; + readonly model: string; + // @internal + static normalizeModelName(modelName: string, backendType: BackendType): string; + } + +// @public +export interface GenAIOptions { + backend: Backend; +} + // @public export interface GenerateContentCandidate { // (undocumented) @@ -323,8 +389,8 @@ export interface GenerativeContentBlob { } // @public -export class GenerativeModel extends VertexAIModel { - constructor(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions); +export class GenerativeModel extends GenAIModel { + constructor(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions); countTokens(request: CountTokensRequest | string | Array): Promise; generateContent(request: GenerateContentRequest | string | Array): Promise; generateContentStream(request: GenerateContentRequest | string | Array): Promise; @@ -344,14 +410,25 @@ export class GenerativeModel extends VertexAIModel { } // @public -export function getGenerativeModel(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export function getGenAI(app?: FirebaseApp, options?: GenAIOptions): GenAI; + +// @public +export function getGenerativeModel(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; // @beta -export function getImagenModel(vertexAI: VertexAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +export function getImagenModel(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; // @public export function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI; +// @public +export type GoogleAIBackend = { + backendType: typeof BackendType.GOOGLE_AI; +}; + +// @public +export function googleAIBackend(): GoogleAIBackend; + // @public @deprecated (undocumented) export interface GroundingAttribution { // (undocumented) @@ -374,7 +451,7 @@ export interface GroundingMetadata { webSearchQueries?: string[]; } -// @public (undocumented) +// @public export enum HarmBlockMethod { PROBABILITY = "PROBABILITY", SEVERITY = "SEVERITY" @@ -413,7 +490,8 @@ export enum HarmSeverity { HARM_SEVERITY_HIGH = "HARM_SEVERITY_HIGH", HARM_SEVERITY_LOW = "HARM_SEVERITY_LOW", HARM_SEVERITY_MEDIUM = "HARM_SEVERITY_MEDIUM", - HARM_SEVERITY_NEGLIGIBLE = "HARM_SEVERITY_NEGLIGIBLE" + HARM_SEVERITY_NEGLIGIBLE = "HARM_SEVERITY_NEGLIGIBLE", + HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED" } // @beta @@ -461,8 +539,8 @@ export interface ImagenInlineImage { } // @beta -export class ImagenModel extends VertexAIModel { - constructor(vertexAI: VertexAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); +export class ImagenModel extends GenAIModel { + constructor(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); generateImages(prompt: string): Promise>; // @internal generateImagesGCS(prompt: string, gcsURI: string): Promise>; @@ -627,7 +705,6 @@ export interface SafetyRating { export interface SafetySetting { // (undocumented) category: HarmCategory; - // (undocumented) method?: HarmBlockMethod; // (undocumented) threshold: HarmBlockThreshold; @@ -779,46 +856,22 @@ export interface UsageMetadata { } // @public -export interface VertexAI { - app: FirebaseApp; - // (undocumented) +export type VertexAI = GenAI; + +// @public +export type VertexAIBackend = { + backendType: typeof BackendType.VERTEX_AI; location: string; -} +}; // @public -export class VertexAIError extends FirebaseError { - constructor(code: VertexAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); - // (undocumented) - readonly code: VertexAIErrorCode; - // (undocumented) - readonly customErrorData?: CustomErrorData | undefined; -} +export function vertexAIBackend(location?: string): VertexAIBackend; // @public -export const enum VertexAIErrorCode { - API_NOT_ENABLED = "api-not-enabled", - ERROR = "error", - FETCH_ERROR = "fetch-error", - INVALID_CONTENT = "invalid-content", - INVALID_SCHEMA = "invalid-schema", - NO_API_KEY = "no-api-key", - NO_APP_ID = "no-app-id", - NO_MODEL = "no-model", - NO_PROJECT_ID = "no-project-id", - PARSE_FAILED = "parse-failed", - REQUEST_ERROR = "request-error", - RESPONSE_ERROR = "response-error" -} +export const VertexAIError: typeof GenAIError; // @public -export abstract class VertexAIModel { - // @internal - protected constructor(vertexAI: VertexAI, modelName: string); - // @internal (undocumented) - protected _apiSettings: ApiSettings; - readonly model: string; - static normalizeModelName(modelName: string): string; -} +export const VertexAIModel: typeof GenAIModel; // @public export interface VertexAIOptions { diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml index 665222edb9d..19ed87f1e54 100644 --- a/docs-devsite/_toc.yaml +++ b/docs-devsite/_toc.yaml @@ -516,6 +516,14 @@ toc: path: /docs/reference/js/vertexai.functionresponse.md - title: FunctionResponsePart path: /docs/reference/js/vertexai.functionresponsepart.md + - title: GenAI + path: /docs/reference/js/vertexai.genai.md + - title: GenAIError + path: /docs/reference/js/vertexai.genaierror.md + - title: GenAIModel + path: /docs/reference/js/vertexai.genaimodel.md + - title: GenAIOptions + path: /docs/reference/js/vertexai.genaioptions.md - title: GenerateContentCandidate path: /docs/reference/js/vertexai.generatecontentcandidate.md - title: GenerateContentRequest @@ -598,12 +606,6 @@ toc: path: /docs/reference/js/vertexai.toolconfig.md - title: UsageMetadata path: /docs/reference/js/vertexai.usagemetadata.md - - title: VertexAI - path: /docs/reference/js/vertexai.vertexai.md - - title: VertexAIError - path: /docs/reference/js/vertexai.vertexaierror.md - - title: VertexAIModel - path: /docs/reference/js/vertexai.vertexaimodel.md - title: VertexAIOptions path: /docs/reference/js/vertexai.vertexaioptions.md - title: VideoMetadata diff --git a/docs-devsite/vertexai.genai.md b/docs-devsite/vertexai.genai.md new file mode 100644 index 00000000000..d8ce617941a --- /dev/null +++ b/docs-devsite/vertexai.genai.md @@ -0,0 +1,64 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# GenAI interface +An instance of the Firebase GenAI SDK. + +Do not create this instance directly. Instead, use [getGenAI()](./vertexai.md#getgenai_65c48ee). + +Signature: + +```typescript +export interface GenAI +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [app](./vertexai.genai.md#genaiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [GenAI](./vertexai.genai.md#genai_interface) instance is associated with. | +| [backend](./vertexai.genai.md#genaibackend) | [Backend](./vertexai.md#backend) | A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. | +| [location](./vertexai.genai.md#genailocation) | string | The location configured for this GenAI service instance, relevant for Vertex AI backends. | + +## GenAI.app + +The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [GenAI](./vertexai.genai.md#genai_interface) instance is associated with. + +Signature: + +```typescript +app: FirebaseApp; +``` + +## GenAI.backend + +A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. + +Signature: + +```typescript +backend: Backend; +``` + +## GenAI.location + +> Warning: This API is now obsolete. +> +> use `GenAI.backend.location` instead. +> + +The location configured for this GenAI service instance, relevant for Vertex AI backends. + +Signature: + +```typescript +location: string; +``` diff --git a/docs-devsite/vertexai.vertexaierror.md b/docs-devsite/vertexai.genaierror.md similarity index 53% rename from docs-devsite/vertexai.vertexaierror.md rename to docs-devsite/vertexai.genaierror.md index 31f527e59b3..eed0a54d264 100644 --- a/docs-devsite/vertexai.vertexaierror.md +++ b/docs-devsite/vertexai.genaierror.md @@ -9,13 +9,13 @@ overwritten. Changes should be made in the source code at https://github.com/firebase/firebase-js-sdk {% endcomment %} -# VertexAIError class +# GenAIError class Error class for the Vertex AI in Firebase SDK. Signature: ```typescript -export declare class VertexAIError extends FirebaseError +export declare class GenAIError extends FirebaseError ``` Extends: [FirebaseError](./util.firebaseerror.md#firebaseerror_class) @@ -23,42 +23,42 @@ export declare class VertexAIError extends FirebaseError | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(code, message, customErrorData)](./vertexai.vertexaierror.md#vertexaierrorconstructor) | | Constructs a new instance of the VertexAIError class. | +| [(constructor)(code, message, customErrorData)](./vertexai.genaierror.md#genaierrorconstructor) | | Constructs a new instance of the GenAIError class. | ## Properties | Property | Modifiers | Type | Description | | --- | --- | --- | --- | -| [code](./vertexai.vertexaierror.md#vertexaierrorcode) | | [VertexAIErrorCode](./vertexai.md#vertexaierrorcode) | | -| [customErrorData](./vertexai.vertexaierror.md#vertexaierrorcustomerrordata) | | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | | +| [code](./vertexai.genaierror.md#genaierrorcode) | | [GenAIErrorCode](./vertexai.md#genaierrorcode) | | +| [customErrorData](./vertexai.genaierror.md#genaierrorcustomerrordata) | | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | | -## VertexAIError.(constructor) +## GenAIError.(constructor) -Constructs a new instance of the `VertexAIError` class. +Constructs a new instance of the `GenAIError` class. Signature: ```typescript -constructor(code: VertexAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +constructor(code: GenAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| code | [VertexAIErrorCode](./vertexai.md#vertexaierrorcode) | The error code from [VertexAIErrorCode](./vertexai.md#vertexaierrorcode). | +| code | [GenAIErrorCode](./vertexai.md#genaierrorcode) | The error code from [GenAIErrorCode](./vertexai.md#genaierrorcode). | | message | string | A human-readable message describing the error. | | customErrorData | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | Optional error data. | -## VertexAIError.code +## GenAIError.code Signature: ```typescript -readonly code: VertexAIErrorCode; +readonly code: GenAIErrorCode; ``` -## VertexAIError.customErrorData +## GenAIError.customErrorData Signature: diff --git a/docs-devsite/vertexai.genaimodel.md b/docs-devsite/vertexai.genaimodel.md new file mode 100644 index 00000000000..3d744a77e28 --- /dev/null +++ b/docs-devsite/vertexai.genaimodel.md @@ -0,0 +1,37 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# GenAIModel class +Base class for Vertex AI in Firebase model APIs. + +The constructor for this class is marked as internal. Third-party code should not call the constructor directly or create subclasses that extend the `GenAIModel` class. + +Signature: + +```typescript +export declare abstract class GenAIModel +``` + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [model](./vertexai.genaimodel.md#genaimodelmodel) | | string | The fully qualified model resource name to use for generating images (for example, publishers/google/models/imagen-3.0-generate-002). | + +## GenAIModel.model + +The fully qualified model resource name to use for generating images (for example, `publishers/google/models/imagen-3.0-generate-002`). + +Signature: + +```typescript +readonly model: string; +``` diff --git a/docs-devsite/vertexai.genaioptions.md b/docs-devsite/vertexai.genaioptions.md new file mode 100644 index 00000000000..d7f5b499f67 --- /dev/null +++ b/docs-devsite/vertexai.genaioptions.md @@ -0,0 +1,35 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# GenAIOptions interface +Options interface for initializing the GenAI service using [getGenAI()](./vertexai.md#getgenai_65c48ee). + +Signature: + +```typescript +export interface GenAIOptions +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [backend](./vertexai.genaioptions.md#genaioptionsbackend) | [Backend](./vertexai.md#backend) | The backend configuration to use for the GenAI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. | + +## GenAIOptions.backend + +The backend configuration to use for the GenAI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. + +Signature: + +```typescript +backend: Backend; +``` diff --git a/docs-devsite/vertexai.generativemodel.md b/docs-devsite/vertexai.generativemodel.md index e4a238b0af5..2cbb56958b1 100644 --- a/docs-devsite/vertexai.generativemodel.md +++ b/docs-devsite/vertexai.generativemodel.md @@ -15,15 +15,15 @@ Class for generative model APIs. Signature: ```typescript -export declare class GenerativeModel extends VertexAIModel +export declare class GenerativeModel extends GenAIModel ``` -Extends: [VertexAIModel](./vertexai.vertexaimodel.md#vertexaimodel_class) +Extends: [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) ## Constructors | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(vertexAI, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(genAI, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | ## Properties @@ -52,14 +52,14 @@ Constructs a new instance of the `GenerativeModel` class Signature: ```typescript -constructor(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| vertexAI | [VertexAI](./vertexai.vertexai.md#vertexai_interface) | | +| genAI | [GenAI](./vertexai.genai.md#genai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | diff --git a/docs-devsite/vertexai.imagengenerationconfig.md b/docs-devsite/vertexai.imagengenerationconfig.md index b6785b9b2bb..8c452bcd116 100644 --- a/docs-devsite/vertexai.imagengenerationconfig.md +++ b/docs-devsite/vertexai.imagengenerationconfig.md @@ -44,6 +44,8 @@ If set to `true`, an invisible SynthID watermark is embedded in generate For Imagen 3 models, the default value is `true`; see the addWatermark documentation for more details. +In Google AI, the default value is true, and it cannot be turned off. + Signature: ```typescript diff --git a/docs-devsite/vertexai.imagenmodel.md b/docs-devsite/vertexai.imagenmodel.md index ed40dc8f578..b6c29dc5d81 100644 --- a/docs-devsite/vertexai.imagenmodel.md +++ b/docs-devsite/vertexai.imagenmodel.md @@ -20,15 +20,15 @@ This class provides methods for generating images using the Imagen model. Signature: ```typescript -export declare class ImagenModel extends VertexAIModel +export declare class ImagenModel extends GenAIModel ``` -Extends: [VertexAIModel](./vertexai.vertexaimodel.md#vertexaimodel_class) +Extends: [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) ## Constructors | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(vertexAI, modelParams, requestOptions)](./vertexai.imagenmodel.md#imagenmodelconstructor) | | (Public Preview) Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class. | +| [(constructor)(genAI, modelParams, requestOptions)](./vertexai.imagenmodel.md#imagenmodelconstructor) | | (Public Preview) Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class. | ## Properties @@ -54,14 +54,14 @@ Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenm Signature: ```typescript -constructor(vertexAI: VertexAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); +constructor(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| vertexAI | [VertexAI](./vertexai.vertexai.md#vertexai_interface) | An instance of the Vertex AI in Firebase SDK. | +| genAI | [GenAI](./vertexai.genai.md#genai_interface) | A [GenAI](./vertexai.genai.md#genai_interface) instance. | | modelParams | [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface) | Parameters to use when making requests to Imagen. | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) \| undefined | Additional options to use when making requests. | @@ -142,7 +142,7 @@ If the request to generate images fails. This happens if the prompt is blocked. ```javascript const imagen = new ImagenModel( - vertexAI, + genAI, { model: 'imagen-3.0-generate-002' } diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md index f67254eef20..1b93328851e 100644 --- a/docs-devsite/vertexai.md +++ b/docs-devsite/vertexai.md @@ -17,10 +17,15 @@ The Vertex AI in Firebase Web SDK. | Function | Description | | --- | --- | | function(app, ...) | -| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.vertexai.md#vertexai_interface) instance for the given app. | -| function(vertexAI, ...) | -| [getGenerativeModel(vertexAI, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_e3037c9) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | -| [getImagenModel(vertexAI, modelParams, requestOptions)](./vertexai.md#getimagenmodel_812c375) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | +| [getGenAI(app, options)](./vertexai.md#getgenai_65c48ee) | Returns the default [GenAI](./vertexai.genai.md#genai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. | +| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | +| function() | +| [googleAIBackend()](./vertexai.md#googleaibackend) | Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. | +| function(genAI, ...) | +| [getGenerativeModel(genAI, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_e3ccf80) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | +| [getImagenModel(genAI, modelParams, requestOptions)](./vertexai.md#getimagenmodel_bffbd6b) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | +| function(location, ...) | +| [vertexAIBackend(location)](./vertexai.md#vertexaibackend_d0a4534) | Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. | ## Classes @@ -29,6 +34,8 @@ The Vertex AI in Firebase Web SDK. | [ArraySchema](./vertexai.arrayschema.md#arrayschema_class) | Schema class for "array" types. The items param should refer to the type of item that can be a member of the array. | | [BooleanSchema](./vertexai.booleanschema.md#booleanschema_class) | Schema class for "boolean" types. | | [ChatSession](./vertexai.chatsession.md#chatsession_class) | ChatSession class that enables sending chat messages and stores history of sent and received messages so far. | +| [GenAIError](./vertexai.genaierror.md#genaierror_class) | Error class for the Vertex AI in Firebase SDK. | +| [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) | Base class for Vertex AI in Firebase model APIs. | | [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) | Class for generative model APIs. | | [ImagenImageFormat](./vertexai.imagenimageformat.md#imagenimageformat_class) | (Public Preview) Defines the image format for images generated by Imagen.Use this class to specify the desired format (JPEG or PNG) and compression quality for images generated by Imagen. This is typically included as part of [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface). | | [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) | (Public Preview) Class for Imagen model APIs.This class provides methods for generating images using the Imagen model. | @@ -37,8 +44,6 @@ The Vertex AI in Firebase Web SDK. | [ObjectSchema](./vertexai.objectschema.md#objectschema_class) | Schema class for "object" types. The properties param must be a map of Schema objects. | | [Schema](./vertexai.schema.md#schema_class) | Parent class encompassing all Schema types, with static methods that allow building specific Schema types. This class can be converted with JSON.stringify() into a JSON string accepted by Vertex AI REST endpoints. (This string conversion is automatically done when calling SDK methods.) | | [StringSchema](./vertexai.stringschema.md#stringschema_class) | Schema class for "string" types. Can be used with or without enum values. | -| [VertexAIError](./vertexai.vertexaierror.md#vertexaierror_class) | Error class for the Vertex AI in Firebase SDK. | -| [VertexAIModel](./vertexai.vertexaimodel.md#vertexaimodel_class) | Base class for Vertex AI in Firebase model APIs. | ## Enumerations @@ -47,7 +52,8 @@ The Vertex AI in Firebase Web SDK. | [BlockReason](./vertexai.md#blockreason) | Reason that a prompt was blocked. | | [FinishReason](./vertexai.md#finishreason) | Reason that a candidate finished. | | [FunctionCallingMode](./vertexai.md#functioncallingmode) | | -| [HarmBlockMethod](./vertexai.md#harmblockmethod) | | +| [GenAIErrorCode](./vertexai.md#genaierrorcode) | Standardized error codes that [GenAIError](./vertexai.genaierror.md#genaierror_class) can have. | +| [HarmBlockMethod](./vertexai.md#harmblockmethod) | This property is not supported in Google AI. | | [HarmBlockThreshold](./vertexai.md#harmblockthreshold) | Threshold above which a prompt or candidate will be blocked. | | [HarmCategory](./vertexai.md#harmcategory) | Harm categories that would cause prompts or candidates to be blocked. | | [HarmProbability](./vertexai.md#harmprobability) | Probability that a prompt or candidate matches a harm category. | @@ -57,7 +63,6 @@ The Vertex AI in Firebase Web SDK. | [ImagenSafetyFilterLevel](./vertexai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence, sexual, derogatory, and toxic). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. | | [Modality](./vertexai.md#modality) | Content part modality. | | [SchemaType](./vertexai.md#schematype) | Contains the list of OpenAPI data types as defined by the [OpenAPI specification](https://swagger.io/docs/specification/data-models/data-types/) | -| [VertexAIErrorCode](./vertexai.md#vertexaierrorcode) | Standardized error codes that [VertexAIError](./vertexai.vertexaierror.md#vertexaierror_class) can have. | ## Interfaces @@ -82,6 +87,8 @@ The Vertex AI in Firebase Web SDK. | [FunctionDeclarationsTool](./vertexai.functiondeclarationstool.md#functiondeclarationstool_interface) | A FunctionDeclarationsTool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. | | [FunctionResponse](./vertexai.functionresponse.md#functionresponse_interface) | The result output from a [FunctionCall](./vertexai.functioncall.md#functioncall_interface) that contains a string representing the [FunctionDeclaration.name](./vertexai.functiondeclaration.md#functiondeclarationname) and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall](./vertexai.functioncall.md#functioncall_interface) made based on model prediction. | | [FunctionResponsePart](./vertexai.functionresponsepart.md#functionresponsepart_interface) | Content part interface if the part represents [FunctionResponse](./vertexai.functionresponse.md#functionresponse_interface). | +| [GenAI](./vertexai.genai.md#genai_interface) | An instance of the Firebase GenAI SDK.Do not create this instance directly. Instead, use [getGenAI()](./vertexai.md#getgenai_65c48ee). | +| [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) | Options interface for initializing the GenAI service using [getGenAI()](./vertexai.md#getgenai_65c48ee). | | [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | A candidate returned as part of a [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface). | | [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) | Request sent through [GenerativeModel.generateContent()](./vertexai.generativemodel.md#generativemodelgeneratecontent) | | [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface) | Individual response from [GenerativeModel.generateContent()](./vertexai.generativemodel.md#generativemodelgeneratecontent) and [GenerativeModel.generateContentStream()](./vertexai.generativemodel.md#generativemodelgeneratecontentstream). generateContentStream() will return one in each chunk until the stream is done. | @@ -99,10 +106,10 @@ The Vertex AI in Firebase Web SDK. | [ImagenSafetySettings](./vertexai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. | | [InlineDataPart](./vertexai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. | | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | -| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9). | +| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). | | [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. | | [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | -| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9). | +| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). | | [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | | | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. | @@ -115,7 +122,6 @@ The Vertex AI in Firebase Web SDK. | [TextPart](./vertexai.textpart.md#textpart_interface) | Content part interface if the part represents a text string. | | [ToolConfig](./vertexai.toolconfig.md#toolconfig_interface) | Tool config. This config is shared for all tools provided in the request. | | [UsageMetadata](./vertexai.usagemetadata.md#usagemetadata_interface) | Usage metadata about a [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface). | -| [VertexAI](./vertexai.vertexai.md#vertexai_interface) | An instance of the Vertex AI in Firebase SDK. | | [VertexAIOptions](./vertexai.vertexaioptions.md#vertexaioptions_interface) | Options when initializing the Vertex AI in Firebase SDK. | | [VideoMetadata](./vertexai.videometadata.md#videometadata_interface) | Describes the input video content. | | [WebAttribution](./vertexai.webattribution.md#webattribution_interface) | | @@ -124,22 +130,79 @@ The Vertex AI in Firebase Web SDK. | Variable | Description | | --- | --- | +| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase GenAI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. | | [POSSIBLE\_ROLES](./vertexai.md#possible_roles) | Possible roles. | +| [VertexAIError](./vertexai.md#vertexaierror) | Error class for the Vertex AI in Firebase SDK.For more information, refer to the documentation for the new [GenAIError](./vertexai.genaierror.md#genaierror_class). | +| [VertexAIModel](./vertexai.md#vertexaimodel) | Base class for Vertex AI in Firebase model APIs.For more information, refer to the documentation for the new [GenAIModel](./vertexai.genaimodel.md#genaimodel_class). | ## Type Aliases | Type Alias | Description | | --- | --- | +| [Backend](./vertexai.md#backend) | Union type representing the backend configuration for the GenAI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object.Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). | +| [BackendType](./vertexai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI' or 'GOOGLE_AI'. | +| [GoogleAIBackend](./vertexai.md#googleaibackend) | Represents the configuration object for the Google AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the service with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). | | [Part](./vertexai.md#part) | Content part - includes text, image/video, or function call/response part types. | | [Role](./vertexai.md#role) | Role is the producer of the content. | | [Tool](./vertexai.md#tool) | Defines a tool that model can call to access external knowledge. | | [TypedSchema](./vertexai.md#typedschema) | A type that includes all specific Schema types. | +| [VertexAI](./vertexai.md#vertexai) | An instance of the Vertex AI in Firebase SDK.For more information, refer to the documentation for the new [GenAI](./vertexai.genai.md#genai_interface). | +| [VertexAIBackend](./vertexai.md#vertexaibackend) | Represents the configuration object for the Vertex AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the server with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. | ## function(app, ...) +### getGenAI(app, options) {:#getgenai_65c48ee} + +Returns the default [GenAI](./vertexai.genai.md#genai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. + +Signature: + +```typescript +export declare function getGenAI(app?: FirebaseApp, options?: GenAIOptions): GenAI; +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| app | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) to use. | +| options | [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) | [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) that configure the GenAI instance. | + +Returns: + +[GenAI](./vertexai.genai.md#genai_interface) + +The default [GenAI](./vertexai.genai.md#genai_interface) instance for the given [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). + +### Example 1 + + +```javascript +const genAI = getGenAI(app); + +``` + +### Example 2 + + +```javascript +// Get a GenAI instance configured to use Google AI. +const genAI = getGenAI(app, { backend: googleAIBackend() }); + +``` + +### Example 3 + + +```javascript +// Get a GenAI instance configured to use Vertex AI. +const genAI = getGenAI(app, { backend: vertexAIBackend() }); + +``` + ### getVertexAI(app, options) {:#getvertexai_04094cf} -Returns a [VertexAI](./vertexai.vertexai.md#vertexai_interface) instance for the given app. +Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. Signature: @@ -156,25 +219,42 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions Returns: -[VertexAI](./vertexai.vertexai.md#vertexai_interface) +[VertexAI](./vertexai.md#vertexai) + +## function() + +### googleAIBackend() {:#googleaibackend} + +Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. + +Signature: + +```typescript +export declare function googleAIBackend(): GoogleAIBackend; +``` +Returns: + +[GoogleAIBackend](./vertexai.md#googleaibackend) + +A [GoogleAIBackend](./vertexai.md#googleaibackend) object. -## function(vertexAI, ...) +## function(genAI, ...) -### getGenerativeModel(vertexAI, modelParams, requestOptions) {:#getgenerativemodel_e3037c9} +### getGenerativeModel(genAI, modelParams, requestOptions) {:#getgenerativemodel_e3ccf80} Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. Signature: ```typescript -export declare function getGenerativeModel(vertexAI: VertexAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export declare function getGenerativeModel(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| vertexAI | [VertexAI](./vertexai.vertexai.md#vertexai_interface) | | +| genAI | [GenAI](./vertexai.genai.md#genai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | @@ -182,7 +262,7 @@ export declare function getGenerativeModel(vertexAI: VertexAI, modelParams: Mode [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) -### getImagenModel(vertexAI, modelParams, requestOptions) {:#getimagenmodel_812c375} +### getImagenModel(genAI, modelParams, requestOptions) {:#getimagenmodel_bffbd6b} > This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. > @@ -194,14 +274,14 @@ Only Imagen 3 models (named `imagen-3.0-*`) are supported. Signature: ```typescript -export declare function getImagenModel(vertexAI: VertexAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +export declare function getImagenModel(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| vertexAI | [VertexAI](./vertexai.vertexai.md#vertexai_interface) | An instance of the Vertex AI in Firebase SDK. | +| genAI | [GenAI](./vertexai.genai.md#genai_interface) | A [GenAI](./vertexai.genai.md#genai_interface) instance. | | modelParams | [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface) | Parameters to use when making Imagen requests. | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Additional options to use when making requests. | @@ -213,6 +293,45 @@ export declare function getImagenModel(vertexAI: VertexAI, modelParams: ImagenMo If the `apiKey` or `projectId` fields are missing in your Firebase config. +## function(location, ...) + +### vertexAIBackend(location) {:#vertexaibackend_d0a4534} + +Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. + +Signature: + +```typescript +export declare function vertexAIBackend(location?: string): VertexAIBackend; +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| location | string | The region identifier, defaulting to us-central1; see [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | + +Returns: + +[VertexAIBackend](./vertexai.md#vertexaibackend) + +A [VertexAIBackend](./vertexai.md#vertexaibackend) object. + +## BackendType + +An enum-like object containing constants that represent the supported backends for the Firebase GenAI SDK. + +These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. + +Signature: + +```typescript +BackendType: { + readonly VERTEX_AI: "VERTEX_AI"; + readonly GOOGLE_AI: "GOOGLE_AI"; +} +``` + ## POSSIBLE\_ROLES Possible roles. @@ -223,6 +342,64 @@ Possible roles. POSSIBLE_ROLES: readonly ["user", "model", "function", "system"] ``` +## VertexAIError + +Error class for the Vertex AI in Firebase SDK. + +For more information, refer to the documentation for the new [GenAIError](./vertexai.genaierror.md#genaierror_class). + +Signature: + +```typescript +VertexAIError: typeof GenAIError +``` + +## VertexAIModel + +Base class for Vertex AI in Firebase model APIs. + +For more information, refer to the documentation for the new [GenAIModel](./vertexai.genaimodel.md#genaimodel_class). + +Signature: + +```typescript +VertexAIModel: typeof GenAIModel +``` + +## Backend + +Union type representing the backend configuration for the GenAI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object. + +Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). + +Signature: + +```typescript +export type Backend = GoogleAIBackend | VertexAIBackend; +``` + +## BackendType + +Type alias representing valid backend types. It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + +Signature: + +```typescript +export type BackendType = (typeof BackendType)[keyof typeof BackendType]; +``` + +## GoogleAIBackend + +Represents the configuration object for the Google AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the service with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). + +Signature: + +```typescript +export type GoogleAIBackend = { + backendType: typeof BackendType.GOOGLE_AI; +}; +``` + ## Part Content part - includes text, image/video, or function call/response part types. @@ -263,6 +440,31 @@ A type that includes all specific Schema types. export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanSchema | ObjectSchema | ArraySchema; ``` +## VertexAI + +An instance of the Vertex AI in Firebase SDK. + +For more information, refer to the documentation for the new [GenAI](./vertexai.genai.md#genai_interface). + +Signature: + +```typescript +export type VertexAI = GenAI; +``` + +## VertexAIBackend + +Represents the configuration object for the Vertex AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the server with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. + +Signature: + +```typescript +export type VertexAIBackend = { + backendType: typeof BackendType.VERTEX_AI; + location: string; +}; +``` + ## BlockReason Reason that a prompt was blocked. @@ -323,8 +525,37 @@ export declare enum FunctionCallingMode | AUTO | "AUTO" | Default model behavior; model decides to predict either a function call or a natural language response. | | NONE | "NONE" | Model will not predict any function call. Model behavior is same as when not passing any function declarations. | +## GenAIErrorCode + +Standardized error codes that [GenAIError](./vertexai.genaierror.md#genaierror_class) can have. + +Signature: + +```typescript +export declare const enum GenAIErrorCode +``` + +## Enumeration Members + +| Member | Value | Description | +| --- | --- | --- | +| API\_NOT\_ENABLED | "api-not-enabled" | An error due to the Firebase API not being enabled in the Console. | +| ERROR | "error" | A generic error occurred. | +| FETCH\_ERROR | "fetch-error" | An error occurred while performing a fetch. | +| INVALID\_CONTENT | "invalid-content" | An error associated with a Content object. | +| INVALID\_SCHEMA | "invalid-schema" | An error due to invalid Schema input. | +| NO\_API\_KEY | "no-api-key" | An error occurred due to a missing Firebase API key. | +| NO\_APP\_ID | "no-app-id" | An error occured due to a missing Firebase app ID. | +| NO\_MODEL | "no-model" | An error occurred due to a model name not being specified during initialization. | +| NO\_PROJECT\_ID | "no-project-id" | An error occurred due to a missing project ID. | +| PARSE\_FAILED | "parse-failed" | An error occurred while parsing. | +| REQUEST\_ERROR | "request-error" | An error occurred in a request. | +| RESPONSE\_ERROR | "response-error" | An error occurred in a response. | +| UNSUPPORTED | "unsupported" | An error occured due an attempt to use an unsupported feature. | + ## HarmBlockMethod +This property is not supported in Google AI. Signature: @@ -414,6 +645,7 @@ export declare enum HarmSeverity | HARM\_SEVERITY\_LOW | "HARM_SEVERITY_LOW" | Low level of harm severity. | | HARM\_SEVERITY\_MEDIUM | "HARM_SEVERITY_MEDIUM" | Medium level of harm severity. | | HARM\_SEVERITY\_NEGLIGIBLE | "HARM_SEVERITY_NEGLIGIBLE" | Negligible level of harm severity. | +| HARM\_SEVERITY\_UNSUPPORTED | "HARM_SEVERITY_UNSUPPORTED" | Harm severity is not supported. The GoogleAI backend does not support HarmSeverity, so this value is used as a fallback. | ## ImagenAspectRatio @@ -531,30 +763,3 @@ export declare enum SchemaType | OBJECT | "object" | Object type. | | STRING | "string" | String type. | -## VertexAIErrorCode - -Standardized error codes that [VertexAIError](./vertexai.vertexaierror.md#vertexaierror_class) can have. - -Signature: - -```typescript -export declare const enum VertexAIErrorCode -``` - -## Enumeration Members - -| Member | Value | Description | -| --- | --- | --- | -| API\_NOT\_ENABLED | "api-not-enabled" | An error due to the Firebase API not being enabled in the Console. | -| ERROR | "error" | A generic error occurred. | -| FETCH\_ERROR | "fetch-error" | An error occurred while performing a fetch. | -| INVALID\_CONTENT | "invalid-content" | An error associated with a Content object. | -| INVALID\_SCHEMA | "invalid-schema" | An error due to invalid Schema input. | -| NO\_API\_KEY | "no-api-key" | An error occurred due to a missing Firebase API key. | -| NO\_APP\_ID | "no-app-id" | An error occured due to a missing Firebase app ID. | -| NO\_MODEL | "no-model" | An error occurred due to a model name not being specified during initialization. | -| NO\_PROJECT\_ID | "no-project-id" | An error occurred due to a missing project ID. | -| PARSE\_FAILED | "parse-failed" | An error occurred while parsing. | -| REQUEST\_ERROR | "request-error" | An error occurred in a request. | -| RESPONSE\_ERROR | "response-error" | An error occurred in a response. | - diff --git a/docs-devsite/vertexai.modelparams.md b/docs-devsite/vertexai.modelparams.md index d3963d240eb..f25f37e4dd6 100644 --- a/docs-devsite/vertexai.modelparams.md +++ b/docs-devsite/vertexai.modelparams.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # ModelParams interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). Signature: diff --git a/docs-devsite/vertexai.requestoptions.md b/docs-devsite/vertexai.requestoptions.md index dcd0c552ecb..ffedaa69859 100644 --- a/docs-devsite/vertexai.requestoptions.md +++ b/docs-devsite/vertexai.requestoptions.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # RequestOptions interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3037c9). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). Signature: diff --git a/docs-devsite/vertexai.safetysetting.md b/docs-devsite/vertexai.safetysetting.md index 17fa1cff839..5a837e8aeff 100644 --- a/docs-devsite/vertexai.safetysetting.md +++ b/docs-devsite/vertexai.safetysetting.md @@ -23,7 +23,7 @@ export interface SafetySetting | Property | Type | Description | | --- | --- | --- | | [category](./vertexai.safetysetting.md#safetysettingcategory) | [HarmCategory](./vertexai.md#harmcategory) | | -| [method](./vertexai.safetysetting.md#safetysettingmethod) | [HarmBlockMethod](./vertexai.md#harmblockmethod) | | +| [method](./vertexai.safetysetting.md#safetysettingmethod) | [HarmBlockMethod](./vertexai.md#harmblockmethod) | This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, a [GenAIError](./vertexai.genaierror.md#genaierror_class) will be thrown. | | [threshold](./vertexai.safetysetting.md#safetysettingthreshold) | [HarmBlockThreshold](./vertexai.md#harmblockthreshold) | | ## SafetySetting.category @@ -36,6 +36,8 @@ category: HarmCategory; ## SafetySetting.method +This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, a [GenAIError](./vertexai.genaierror.md#genaierror_class) will be thrown. + Signature: ```typescript diff --git a/docs-devsite/vertexai.schemashared.md b/docs-devsite/vertexai.schemashared.md index 0764a53bdc0..90956b93606 100644 --- a/docs-devsite/vertexai.schemashared.md +++ b/docs-devsite/vertexai.schemashared.md @@ -25,7 +25,7 @@ export interface SchemaShared | [description](./vertexai.schemashared.md#schemashareddescription) | string | Optional. The description of the property. | | [enum](./vertexai.schemashared.md#schemasharedenum) | string\[\] | Optional. The enum of the property. | | [example](./vertexai.schemashared.md#schemasharedexample) | unknown | Optional. The example of the property. | -| [format](./vertexai.schemashared.md#schemasharedformat) | string | Optional. The format of the property. | +| [format](./vertexai.schemashared.md#schemasharedformat) | string | Optional. The format of the property. When using the Google AI backend, this must be either 'enum' or 'date-time', otherwise requests will fail. | | [items](./vertexai.schemashared.md#schemashareditems) | T | Optional. The items of the property. | | [nullable](./vertexai.schemashared.md#schemasharednullable) | boolean | Optional. Whether the property is nullable. | | [properties](./vertexai.schemashared.md#schemasharedproperties) | { \[k: string\]: T; } | Optional. Map of Schema objects. | @@ -62,7 +62,7 @@ example?: unknown; ## SchemaShared.format -Optional. The format of the property. +Optional. The format of the property. When using the Google AI backend, this must be either `'enum'` or `'date-time'`, otherwise requests will fail. Signature: diff --git a/docs-devsite/vertexai.vertexai.md b/docs-devsite/vertexai.vertexai.md deleted file mode 100644 index d30d0f7113e..00000000000 --- a/docs-devsite/vertexai.vertexai.md +++ /dev/null @@ -1,44 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# VertexAI interface -An instance of the Vertex AI in Firebase SDK. - -Signature: - -```typescript -export interface VertexAI -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [app](./vertexai.vertexai.md#vertexaiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [VertexAI](./vertexai.vertexai.md#vertexai_interface) instance is associated with. | -| [location](./vertexai.vertexai.md#vertexailocation) | string | | - -## VertexAI.app - -The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [VertexAI](./vertexai.vertexai.md#vertexai_interface) instance is associated with. - -Signature: - -```typescript -app: FirebaseApp; -``` - -## VertexAI.location - -Signature: - -```typescript -location: string; -``` diff --git a/docs-devsite/vertexai.vertexaimodel.md b/docs-devsite/vertexai.vertexaimodel.md deleted file mode 100644 index 5c3244fe1e5..00000000000 --- a/docs-devsite/vertexai.vertexaimodel.md +++ /dev/null @@ -1,66 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# VertexAIModel class -Base class for Vertex AI in Firebase model APIs. - -The constructor for this class is marked as internal. Third-party code should not call the constructor directly or create subclasses that extend the `VertexAIModel` class. - -Signature: - -```typescript -export declare abstract class VertexAIModel -``` - -## Properties - -| Property | Modifiers | Type | Description | -| --- | --- | --- | --- | -| [model](./vertexai.vertexaimodel.md#vertexaimodelmodel) | | string | The fully qualified model resource name to use for generating images (for example, publishers/google/models/imagen-3.0-generate-002). | - -## Methods - -| Method | Modifiers | Description | -| --- | --- | --- | -| [normalizeModelName(modelName)](./vertexai.vertexaimodel.md#vertexaimodelnormalizemodelname) | static | Normalizes the given model name to a fully qualified model resource name. | - -## VertexAIModel.model - -The fully qualified model resource name to use for generating images (for example, `publishers/google/models/imagen-3.0-generate-002`). - -Signature: - -```typescript -readonly model: string; -``` - -## VertexAIModel.normalizeModelName() - -Normalizes the given model name to a fully qualified model resource name. - -Signature: - -```typescript -static normalizeModelName(modelName: string): string; -``` - -#### Parameters - -| Parameter | Type | Description | -| --- | --- | --- | -| modelName | string | The model name to normalize. | - -Returns: - -string - -The fully qualified model resource name. - diff --git a/packages/firebase/package.json b/packages/firebase/package.json index 0a108875770..f47e3378ee2 100644 --- a/packages/firebase/package.json +++ b/packages/firebase/package.json @@ -227,6 +227,18 @@ }, "default": "./storage/dist/esm/index.esm.js" }, + "./genai": { + "types": "./vertexai/dist/vertexai/index.d.ts", + "node": { + "require": "./vertexai/dist/index.cjs.js", + "import": "./vertexai/dist/index.mjs" + }, + "browser": { + "require": "./vertexai/dist/index.cjs.js", + "import": "./vertexai/dist/esm/index.esm.js" + }, + "default": "./vertexai/dist/esm/index.esm.js" + }, "./vertexai": { "types": "./vertexai/dist/vertexai/index.d.ts", "node": { diff --git a/packages/vertexai/.eslintrc.js b/packages/vertexai/.eslintrc.js index 1e8712b0633..dbc1d5aa33c 100644 --- a/packages/vertexai/.eslintrc.js +++ b/packages/vertexai/.eslintrc.js @@ -30,6 +30,7 @@ module.exports = { { 'packageDir': [path.resolve(__dirname, '../../'), __dirname] } - ] + ], + '@typescript-eslint/consistent-type-definitions': 0 } }; diff --git a/packages/vertexai/src/api.test.ts b/packages/vertexai/src/api.test.ts index 4a0b978d858..dc04b918dd9 100644 --- a/packages/vertexai/src/api.test.ts +++ b/packages/vertexai/src/api.test.ts @@ -14,14 +14,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { ImagenModelParams, ModelParams, VertexAIErrorCode } from './types'; -import { VertexAIError } from './errors'; -import { ImagenModel, getGenerativeModel, getImagenModel } from './api'; +import { ImagenModelParams, ModelParams, GenAIErrorCode } from './types'; +import { GenAIError } from './errors'; +import { + ImagenModel, + getGenerativeModel, + getImagenModel, + googleAIBackend, + vertexAIBackend +} from './api'; import { expect } from 'chai'; -import { VertexAI } from './public-types'; +import { BackendType, GenAI } from './public-types'; import { GenerativeModel } from './models/generative-model'; +import { DEFAULT_LOCATION } from './constants'; -const fakeVertexAI: VertexAI = { +const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -31,140 +38,161 @@ const fakeVertexAI: VertexAI = { appId: 'my-appid' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; describe('Top level API', () => { it('getGenerativeModel throws if no model is provided', () => { try { - getGenerativeModel(fakeVertexAI, {} as ModelParams); + getGenerativeModel(fakeGenAI, {} as ModelParams); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_MODEL); - expect((e as VertexAIError).message).includes( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_MODEL); + expect((e as GenAIError).message).includes( `VertexAI: Must provide a model name. Example: ` + - `getGenerativeModel({ model: 'my-model-name' }) (vertexAI/${VertexAIErrorCode.NO_MODEL})` + `getGenerativeModel({ model: 'my-model-name' }) (vertexAI/${GenAIErrorCode.NO_MODEL})` ); } }); it('getGenerativeModel throws if no apiKey is provided', () => { const fakeVertexNoApiKey = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { projectId: 'my-project', appId: 'my-appid' } } - } as VertexAI; + } as GenAI; try { getGenerativeModel(fakeVertexNoApiKey, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_API_KEY); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_API_KEY); + expect((e as GenAIError).message).equals( `VertexAI: The "apiKey" field is empty in the local ` + `Firebase config. Firebase VertexAI requires this field to` + - ` contain a valid API key. (vertexAI/${VertexAIErrorCode.NO_API_KEY})` + ` contain a valid API key. (vertexAI/${GenAIErrorCode.NO_API_KEY})` ); } }); it('getGenerativeModel throws if no projectId is provided', () => { const fakeVertexNoProject = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { apiKey: 'my-key', appId: 'my-appid' } } - } as VertexAI; + } as GenAI; try { getGenerativeModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes( - VertexAIErrorCode.NO_PROJECT_ID - ); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_PROJECT_ID); + expect((e as GenAIError).message).equals( `VertexAI: The "projectId" field is empty in the local` + ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid project ID. (vertexAI/${VertexAIErrorCode.NO_PROJECT_ID})` + `to contain a valid project ID. (vertexAI/${GenAIErrorCode.NO_PROJECT_ID})` ); } }); it('getGenerativeModel throws if no appId is provided', () => { const fakeVertexNoProject = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { apiKey: 'my-key', projectId: 'my-projectid' } } - } as VertexAI; + } as GenAI; try { getGenerativeModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_APP_ID); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_APP_ID); + expect((e as GenAIError).message).equals( `VertexAI: The "appId" field is empty in the local` + ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid app ID. (vertexAI/${VertexAIErrorCode.NO_APP_ID})` + `to contain a valid app ID. (vertexAI/${GenAIErrorCode.NO_APP_ID})` ); } }); it('getGenerativeModel gets a GenerativeModel', () => { - const genModel = getGenerativeModel(fakeVertexAI, { model: 'my-model' }); + const genModel = getGenerativeModel(fakeGenAI, { model: 'my-model' }); expect(genModel).to.be.an.instanceOf(GenerativeModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); it('getImagenModel throws if no model is provided', () => { try { - getImagenModel(fakeVertexAI, {} as ImagenModelParams); + getImagenModel(fakeGenAI, {} as ImagenModelParams); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_MODEL); - expect((e as VertexAIError).message).includes( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_MODEL); + expect((e as GenAIError).message).includes( `VertexAI: Must provide a model name. Example: ` + - `getImagenModel({ model: 'my-model-name' }) (vertexAI/${VertexAIErrorCode.NO_MODEL})` + `getImagenModel({ model: 'my-model-name' }) (vertexAI/${GenAIErrorCode.NO_MODEL})` ); } }); it('getImagenModel throws if no apiKey is provided', () => { const fakeVertexNoApiKey = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { projectId: 'my-project', appId: 'my-appid' } } - } as VertexAI; + } as GenAI; try { getImagenModel(fakeVertexNoApiKey, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_API_KEY); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_API_KEY); + expect((e as GenAIError).message).equals( `VertexAI: The "apiKey" field is empty in the local ` + `Firebase config. Firebase VertexAI requires this field to` + - ` contain a valid API key. (vertexAI/${VertexAIErrorCode.NO_API_KEY})` + ` contain a valid API key. (vertexAI/${GenAIErrorCode.NO_API_KEY})` ); } }); it('getImagenModel throws if no projectId is provided', () => { const fakeVertexNoProject = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { apiKey: 'my-key', appId: 'my-appid' } } - } as VertexAI; + } as GenAI; try { getImagenModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes( - VertexAIErrorCode.NO_PROJECT_ID - ); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_PROJECT_ID); + expect((e as GenAIError).message).equals( `VertexAI: The "projectId" field is empty in the local` + ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid project ID. (vertexAI/${VertexAIErrorCode.NO_PROJECT_ID})` + `to contain a valid project ID. (vertexAI/${GenAIErrorCode.NO_PROJECT_ID})` ); } }); it('getImagenModel throws if no appId is provided', () => { const fakeVertexNoProject = { - ...fakeVertexAI, + ...fakeGenAI, app: { options: { apiKey: 'my-key', projectId: 'my-project' } } - } as VertexAI; + } as GenAI; try { getImagenModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as VertexAIError).code).includes(VertexAIErrorCode.NO_APP_ID); - expect((e as VertexAIError).message).equals( + expect((e as GenAIError).code).includes(GenAIErrorCode.NO_APP_ID); + expect((e as GenAIError).message).equals( `VertexAI: The "appId" field is empty in the local` + ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid app ID. (vertexAI/${VertexAIErrorCode.NO_APP_ID})` + `to contain a valid app ID. (vertexAI/${GenAIErrorCode.NO_APP_ID})` ); } }); it('getImagenModel gets an ImagenModel', () => { - const genModel = getImagenModel(fakeVertexAI, { model: 'my-model' }); + const genModel = getImagenModel(fakeGenAI, { model: 'my-model' }); expect(genModel).to.be.an.instanceOf(ImagenModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); + it('googleAIBackend returns a backend with backendType GOOGLE_AI', () => { + const backend = googleAIBackend(); + expect(backend.backendType).to.equal(BackendType.GOOGLE_AI); + }); + it('vertexAIBackend returns a backend with backendType VERTEX_AI', () => { + const backend = vertexAIBackend(); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(DEFAULT_LOCATION); + }); + it('vertexAIBackend sets custom location', () => { + const backend = vertexAIBackend('test-location'); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal('test-location'); + }); + it('vertexAIBackend sets custom location even if empty string', () => { + const backend = vertexAIBackend(''); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(''); + }); + it('vertexAIBackend uses default location if location is null', () => { + const backend = vertexAIBackend(null as any); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(DEFAULT_LOCATION); + }); }); diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index 7843a5bdeee..11f9dbcf56c 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -18,27 +18,55 @@ import { FirebaseApp, getApp, _getProvider } from '@firebase/app'; import { Provider } from '@firebase/component'; import { getModularInstance } from '@firebase/util'; -import { DEFAULT_LOCATION, VERTEX_TYPE } from './constants'; -import { VertexAIService } from './service'; -import { VertexAI, VertexAIOptions } from './public-types'; +import { DEFAULT_LOCATION, GENAI_TYPE } from './constants'; +import { GenAIService } from './service'; +import { + BackendType, + GenAI, + GenAIOptions, + GoogleAIBackend, + VertexAI, + VertexAIBackend, + VertexAIOptions +} from './public-types'; import { ImagenModelParams, ModelParams, RequestOptions, - VertexAIErrorCode + GenAIErrorCode } from './types'; -import { VertexAIError } from './errors'; -import { VertexAIModel, GenerativeModel, ImagenModel } from './models'; +import { GenAIError } from './errors'; +import { GenAIModel, GenerativeModel, ImagenModel } from './models'; +import { encodeInstanceIdentifier } from './helpers'; export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; export { ImagenImageFormat } from './requests/imagen-image-format'; -export { VertexAIModel, GenerativeModel, ImagenModel }; -export { VertexAIError }; +export { GenAIModel, GenerativeModel, ImagenModel, GenAIError }; + +export { GenAIErrorCode as VertexAIErrorCode }; + +/** + * Base class for Vertex AI in Firebase model APIs. + * + * For more information, refer to the documentation for the new {@link GenAIModel}. + * + * @public + */ +export const VertexAIModel = GenAIModel; + +/** + * Error class for the Vertex AI in Firebase SDK. + * + * For more information, refer to the documentation for the new {@link GenAIError}. + * + * @public + */ +export const VertexAIError = GenAIError; declare module '@firebase/component' { interface NameServiceMapping { - [VERTEX_TYPE]: VertexAIService; + [GENAI_TYPE]: GenAIService; } } @@ -55,13 +83,93 @@ export function getVertexAI( ): VertexAI { app = getModularInstance(app); // Dependencies - const vertexProvider: Provider<'vertexAI'> = _getProvider(app, VERTEX_TYPE); + const genAIProvider: Provider<'genAI'> = _getProvider(app, GENAI_TYPE); - return vertexProvider.getImmediate({ - identifier: options?.location || DEFAULT_LOCATION + const identifier = encodeInstanceIdentifier({ + backendType: BackendType.VERTEX_AI, + location: options?.location ?? DEFAULT_LOCATION + }); + return genAIProvider.getImmediate({ + identifier }); } +/** + * Returns the default {@link GenAI} instance that is associated with the provided + * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the + * default settings. + * + * @example + * ```javascript + * const genAI = getGenAI(app); + * ``` + * + * @example + * ```javascript + * // Get a GenAI instance configured to use Google AI. + * const genAI = getGenAI(app, { backend: googleAIBackend() }); + * ``` + * + * @example + * ```javascript + * // Get a GenAI instance configured to use Vertex AI. + * const genAI = getGenAI(app, { backend: vertexAIBackend() }); + * ``` + * + * @param app - The {@link @firebase/app#FirebaseApp} to use. + * @param options - {@link GenAIOptions} that configure the GenAI instance. + * @returns The default {@link GenAI} instance for the given {@link @firebase/app#FirebaseApp}. + * + * @public + */ +export function getGenAI( + app: FirebaseApp = getApp(), + options: GenAIOptions = { backend: googleAIBackend() } +): GenAI { + app = getModularInstance(app); + // Dependencies + const genAIProvider: Provider<'genAI'> = _getProvider(app, GENAI_TYPE); + + const identifier = encodeInstanceIdentifier(options.backend); + return genAIProvider.getImmediate({ + identifier + }); +} + +/** + * Creates a {@link Backend} instance configured to use Google AI. + * + * @returns A {@link GoogleAIBackend} object. + * + * @public + */ +export function googleAIBackend(): GoogleAIBackend { + const backend: GoogleAIBackend = { + backendType: BackendType.GOOGLE_AI + }; + + return backend; +} + +/** + * Creates a {@link Backend} instance configured to use Vertex AI. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} + * for a list of supported locations. + * @returns A {@link VertexAIBackend} object. + * + * @public + */ +export function vertexAIBackend(location?: string): VertexAIBackend { + const backend: VertexAIBackend = { + backendType: BackendType.VERTEX_AI, + location: location ?? DEFAULT_LOCATION + }; + + return backend; +} + /** * Returns a {@link GenerativeModel} class with methods for inference * and other functionality. @@ -69,17 +177,17 @@ export function getVertexAI( * @public */ export function getGenerativeModel( - vertexAI: VertexAI, + genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions ): GenerativeModel { if (!modelParams.model) { - throw new VertexAIError( - VertexAIErrorCode.NO_MODEL, + throw new GenAIError( + GenAIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })` ); } - return new GenerativeModel(vertexAI, modelParams, requestOptions); + return new GenerativeModel(genAI, modelParams, requestOptions); } /** @@ -87,7 +195,7 @@ export function getGenerativeModel( * * Only Imagen 3 models (named `imagen-3.0-*`) are supported. * - * @param vertexAI - An instance of the Vertex AI in Firebase SDK. + * @param genAI - A {@link GenAI} instance. * @param modelParams - Parameters to use when making Imagen requests. * @param requestOptions - Additional options to use when making requests. * @@ -97,15 +205,15 @@ export function getGenerativeModel( * @beta */ export function getImagenModel( - vertexAI: VertexAI, + genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions ): ImagenModel { if (!modelParams.model) { - throw new VertexAIError( - VertexAIErrorCode.NO_MODEL, + throw new GenAIError( + GenAIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })` ); } - return new ImagenModel(vertexAI, modelParams, requestOptions); + return new ImagenModel(genAI, modelParams, requestOptions); } diff --git a/packages/vertexai/src/backwards-compatbility.test.ts b/packages/vertexai/src/backwards-compatbility.test.ts new file mode 100644 index 00000000000..23d0511445a --- /dev/null +++ b/packages/vertexai/src/backwards-compatbility.test.ts @@ -0,0 +1,85 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect } from 'chai'; +import { + GenAIError, + GenAIModel, + GenerativeModel, + VertexAIError, + VertexAIErrorCode, + VertexAIModel, + getGenerativeModel, + getImagenModel, + vertexAIBackend +} from './api'; +import { GenAI, VertexAI, GenAIErrorCode } from './public-types'; + +function assertAssignable(): void {} + +const fakeGenAI: GenAI = { + app: { + name: 'DEFAULT', + automaticDataCollectionEnabled: true, + options: { + apiKey: 'key', + projectId: 'my-project', + appId: 'app-id' + } + }, + backend: vertexAIBackend('us-central1'), + location: 'us-central1' +}; + +const fakeVertexAI: VertexAI = fakeGenAI; + +describe('backwards-compatible types', () => { + it('GenAI is backwards compatible with VertexAI', () => { + assertAssignable(); + }); + it('GenAIError is backwards compatible with VertexAIError', () => { + assertAssignable(); + const err = new VertexAIError(VertexAIErrorCode.ERROR, ''); + expect(err).instanceOf(GenAIError); + expect(err).instanceOf(VertexAIError); + }); + it('GenAIErrorCode is backwards compatible with VertexAIErrorCode', () => { + assertAssignable(); + const errCode = GenAIErrorCode.ERROR; + expect(errCode).to.equal(VertexAIErrorCode.ERROR); + }); + it('GenAIModel is backwards compatible with VertexAIModel', () => { + assertAssignable(); + + const model = new GenerativeModel(fakeGenAI, { model: 'model-name' }); + expect(model).to.be.instanceOf(GenAIModel); + expect(model).to.be.instanceOf(VertexAIModel); + }); +}); + +describe('backward-compatible functions', () => { + it('getGenerativeModel', () => { + const model = getGenerativeModel(fakeVertexAI, { model: 'model-name' }); + expect(model).to.be.instanceOf(GenAIModel); + expect(model).to.be.instanceOf(VertexAIModel); + }); + it('getImagenModel', () => { + const model = getImagenModel(fakeVertexAI, { model: 'model-name' }); + expect(model).to.be.instanceOf(GenAIModel); + expect(model).to.be.instanceOf(VertexAIModel); + }); +}); diff --git a/packages/vertexai/src/constants.ts b/packages/vertexai/src/constants.ts index 357e6c4e77c..4034e410003 100644 --- a/packages/vertexai/src/constants.ts +++ b/packages/vertexai/src/constants.ts @@ -16,9 +16,18 @@ */ import { version } from '../package.json'; +import { BackendType } from './public-types'; +import { InstanceIdentifier } from './types/internal'; +// TODO (v12): Remove this export const VERTEX_TYPE = 'vertexAI'; +export const GENAI_TYPE = 'genAI'; + +export const DEFAULT_INSTANCE_IDENTIFER: InstanceIdentifier = { + backendType: BackendType.GOOGLE_AI +}; + export const DEFAULT_LOCATION = 'us-central1'; export const DEFAULT_BASE_URL = 'https://firebasevertexai.googleapis.com'; diff --git a/packages/vertexai/src/errors.ts b/packages/vertexai/src/errors.ts index ad3f9b72f5a..c836e49d51e 100644 --- a/packages/vertexai/src/errors.ts +++ b/packages/vertexai/src/errors.ts @@ -16,7 +16,7 @@ */ import { FirebaseError } from '@firebase/util'; -import { VertexAIErrorCode, CustomErrorData } from './types'; +import { GenAIErrorCode as GenAIErrorCode, CustomErrorData } from './types'; import { VERTEX_TYPE } from './constants'; /** @@ -24,22 +24,22 @@ import { VERTEX_TYPE } from './constants'; * * @public */ -export class VertexAIError extends FirebaseError { +export class GenAIError extends FirebaseError { /** - * Constructs a new instance of the `VertexAIError` class. + * Constructs a new instance of the `GenAIError` class. * - * @param code - The error code from {@link VertexAIErrorCode}. + * @param code - The error code from {@link GenAIErrorCode}. * @param message - A human-readable message describing the error. * @param customErrorData - Optional error data. */ constructor( - readonly code: VertexAIErrorCode, + readonly code: GenAIErrorCode, message: string, readonly customErrorData?: CustomErrorData ) { // Match error format used by FirebaseError from ErrorFactory - const service = VERTEX_TYPE; - const serviceName = 'VertexAI'; + const service = VERTEX_TYPE; // TODO (v12): Rename to GENAI_TYPE + const serviceName = 'VertexAI'; // TODO (v12): Rename to GenAI on breaking release. const fullCode = `${service}/${code}`; const fullMessage = `${serviceName}: ${message} (${fullCode})`; super(code, fullMessage); @@ -51,14 +51,14 @@ export class VertexAIError extends FirebaseError { if (Error.captureStackTrace) { // Allows us to initialize the stack trace without including the constructor itself at the // top level of the stack trace. - Error.captureStackTrace(this, VertexAIError); + Error.captureStackTrace(this, GenAIError); } - // Allows instanceof VertexAIError in ES5/ES6 + // Allows instanceof GenAIError in ES5/ES6 // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget // which we can now use since we no longer target ES5. - Object.setPrototypeOf(this, VertexAIError.prototype); + Object.setPrototypeOf(this, GenAIError.prototype); // Since Error is an interface, we don't inherit toString and so we define it ourselves. this.toString = () => fullMessage; diff --git a/packages/vertexai/src/googleAIMappers.test.ts b/packages/vertexai/src/googleAIMappers.test.ts new file mode 100644 index 00000000000..3952cce2544 --- /dev/null +++ b/packages/vertexai/src/googleAIMappers.test.ts @@ -0,0 +1,400 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect, use } from 'chai'; +import sinon, { restore, stub } from 'sinon'; +import sinonChai from 'sinon-chai'; +import { + mapCountTokensRequest, + mapGenerateContentCandidates, + mapGenerateContentRequest, + mapGenerateContentResponse, + mapPromptFeedback +} from './googleAIMappers'; +import { + BlockReason, + Content, + CountTokensRequest, + GenerateContentRequest, + HarmBlockMethod, + HarmBlockThreshold, + HarmCategory, + HarmProbability, + HarmSeverity, + SafetyRating, + GenAIErrorCode, + FinishReason, + PromptFeedback +} from './types'; +import { + GoogleAIGenerateContentResponse, + GoogleAIGenerateContentCandidate, + GoogleAICountTokensRequest +} from './types/googleAI'; +import { logger } from './logger'; +import { GenAIError } from './errors'; +import { getMockResponse } from '../test-utils/mock-response'; + +use(sinonChai); + +const fakeModel = 'models/gemini-pro'; + +const fakeContents: Content[] = [{ role: 'user', parts: [{ text: 'hello' }] }]; + +describe('Google AI Mappers', () => { + let loggerWarnStub: sinon.SinonStub; + + beforeEach(() => { + loggerWarnStub = stub(logger, 'warn'); + }); + + afterEach(() => { + restore(); + }); + + describe('mapGenerateContentRequest', () => { + it('should throw if safetySettings contain method', () => { + const request: GenerateContentRequest = { + contents: fakeContents, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + method: HarmBlockMethod.SEVERITY + } + ] + }; + expect(() => mapGenerateContentRequest(request)) + .to.throw(GenAIError, /SafetySetting.method is not supported/i) + .with.property('code', GenAIErrorCode.UNSUPPORTED); + }); + + it('should warn and round topK if present', () => { + const request: GenerateContentRequest = { + contents: fakeContents, + generationConfig: { + topK: 15.7 + } + }; + const mappedRequest = mapGenerateContentRequest(request); + expect(loggerWarnStub).to.have.been.calledOnceWith( + 'topK in GenerationConfig has been rounded to the nearest integer.' + ); + expect(mappedRequest.generationConfig?.topK).to.equal(16); + }); + + it('should not modify topK if it is already an integer', () => { + const request: GenerateContentRequest = { + contents: fakeContents, + generationConfig: { + topK: 16 + } + }; + const mappedRequest = mapGenerateContentRequest(request); + expect(loggerWarnStub).to.not.have.been.called; + expect(mappedRequest.generationConfig?.topK).to.equal(16); + }); + + it('should return the request mostly unchanged if valid', () => { + const request: GenerateContentRequest = { + contents: fakeContents, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE + } + ], + generationConfig: { + temperature: 0.5 + } + }; + const mappedRequest = mapGenerateContentRequest({ ...request }); + expect(mappedRequest).to.deep.equal(request); + expect(loggerWarnStub).to.not.have.been.called; + }); + }); + + describe('mapGenerateContentResponse', () => { + it('should map a full Google AI response', async () => { + const googleAIMockResponse: GoogleAIGenerateContentResponse = await ( + getMockResponse('googleAI', 'unary-success-citations.txt') as Response + ).json(); + console.log(JSON.stringify(googleAIMockResponse)); + const mappedResponse = mapGenerateContentResponse(googleAIMockResponse); + + console.log(JSON.stringify(googleAIMockResponse)); + console.log(JSON.stringify(mappedResponse)); + + expect(mappedResponse.candidates).to.exist; + expect(mappedResponse.candidates?.[0].content.parts[0].text).to.contain( + 'quantum mechanics' + ); + + // Mapped citations + expect( + mappedResponse.candidates?.[0].citationMetadata?.citations[0].startIndex + ).to.equal( + googleAIMockResponse.candidates?.[0].citationMetadata + ?.citationSources[0].startIndex + ); + expect( + mappedResponse.candidates?.[0].citationMetadata?.citations[0].endIndex + ).to.equal( + googleAIMockResponse.candidates?.[0].citationMetadata + ?.citationSources[0].endIndex + ); + + // Mapped safety ratings + expect( + mappedResponse.candidates?.[0].safetyRatings?.[0].probabilityScore + ).to.equal(0); + expect( + mappedResponse.candidates?.[0].safetyRatings?.[0].severityScore + ).to.equal(0); + expect( + mappedResponse.candidates?.[0].safetyRatings?.[0].severity + ).to.equal(HarmSeverity.HARM_SEVERITY_UNSUPPORTED); + + expect(mappedResponse.candidates?.[0].finishReason).to.equal( + FinishReason.STOP + ); + + // Check usage metadata passthrough + expect(mappedResponse.usageMetadata).to.deep.equal( + googleAIMockResponse.usageMetadata + ); + }); + + it('should handle missing candidates and promptFeedback', () => { + const googleAIResponse: GoogleAIGenerateContentResponse = { + // No candidates + // No promptFeedback + usageMetadata: { + promptTokenCount: 5, + candidatesTokenCount: 0, + totalTokenCount: 5 + } + }; + const mappedResponse = mapGenerateContentResponse(googleAIResponse); + expect(mappedResponse.candidates).to.be.undefined; + expect(mappedResponse.promptFeedback).to.be.undefined; // Mapped to undefined + expect(mappedResponse.usageMetadata).to.deep.equal( + googleAIResponse.usageMetadata + ); + }); + + it('should handle empty candidates array', () => { + const googleAIResponse: GoogleAIGenerateContentResponse = { + candidates: [], + usageMetadata: { + promptTokenCount: 5, + candidatesTokenCount: 0, + totalTokenCount: 5 + } + }; + const mappedResponse = mapGenerateContentResponse(googleAIResponse); + expect(mappedResponse.candidates).to.deep.equal([]); + expect(mappedResponse.promptFeedback).to.be.undefined; + expect(mappedResponse.usageMetadata).to.deep.equal( + googleAIResponse.usageMetadata + ); + }); + }); + + describe('mapCountTokensRequest', () => { + it('should map a Vertex AI CountTokensRequest to Google AI format', () => { + const vertexRequest: CountTokensRequest = { + contents: fakeContents, + systemInstruction: { role: 'system', parts: [{ text: 'Be nice' }] }, + tools: [ + { functionDeclarations: [{ name: 'foo', description: 'bar' }] } + ], + generationConfig: { temperature: 0.8 } + }; + + const expectedGoogleAIRequest: GoogleAICountTokensRequest = { + generateContentRequest: { + model: fakeModel, + contents: vertexRequest.contents, + systemInstruction: vertexRequest.systemInstruction, + tools: vertexRequest.tools, + generationConfig: vertexRequest.generationConfig + } + }; + + const mappedRequest = mapCountTokensRequest(vertexRequest, fakeModel); + expect(mappedRequest).to.deep.equal(expectedGoogleAIRequest); + }); + + it('should map a minimal Vertex AI CountTokensRequest', () => { + const vertexRequest: CountTokensRequest = { + contents: fakeContents + }; + + const expectedGoogleAIRequest: GoogleAICountTokensRequest = { + generateContentRequest: { + model: fakeModel, + contents: vertexRequest.contents, + systemInstruction: undefined, + tools: undefined, + generationConfig: undefined + } + }; + + const mappedRequest = mapCountTokensRequest(vertexRequest, fakeModel); + expect(mappedRequest).to.deep.equal(expectedGoogleAIRequest); + }); + }); + + describe('mapGenerateContentCandidates', () => { + it('should map citationSources to citationMetadata.citations', () => { + const candidates: GoogleAIGenerateContentCandidate[] = [ + { + index: 0, + content: { role: 'model', parts: [{ text: 'Cited text' }] }, + citationMetadata: { + citationSources: [ + { startIndex: 0, endIndex: 5, uri: 'uri1', license: 'MIT' }, + { startIndex: 6, endIndex: 10, uri: 'uri2' } + ] + } + } + ]; + const mapped = mapGenerateContentCandidates(candidates); + expect(mapped[0].citationMetadata).to.exist; + expect(mapped[0].citationMetadata?.citations).to.deep.equal( + candidates[0].citationMetadata?.citationSources + ); + expect(mapped[0].citationMetadata?.citations[0].title).to.be.undefined; // Not in Google AI + expect(mapped[0].citationMetadata?.citations[0].publicationDate).to.be + .undefined; // Not in Google AI + }); + + it('should add default safety rating properties and warn', () => { + const candidates: GoogleAIGenerateContentCandidate[] = [ + { + index: 0, + content: { role: 'model', parts: [{ text: 'Maybe unsafe' }] }, + safetyRatings: [ + { + category: HarmCategory.HARM_CATEGORY_HARASSMENT, + probability: HarmProbability.MEDIUM, + blocked: false + // Missing severity, probabilityScore, severityScore + } as any + ] + } + ]; + const mapped = mapGenerateContentCandidates(candidates); + expect(loggerWarnStub).to.have.been.calledOnceWith( + "Candidate safety rating properties 'severity', 'severityScore', and 'probabilityScore' are not included in responses from Google AI. Properties have been assigned to default values." + ); + expect(mapped[0].safetyRatings).to.exist; + const safetyRating = mapped[0].safetyRatings?.[0] as SafetyRating; // Type assertion + expect(safetyRating.severity).to.equal( + HarmSeverity.HARM_SEVERITY_UNSUPPORTED + ); + expect(safetyRating.probabilityScore).to.equal(0); + expect(safetyRating.severityScore).to.equal(0); + // Existing properties should be preserved + expect(safetyRating.category).to.equal( + HarmCategory.HARM_CATEGORY_HARASSMENT + ); + expect(safetyRating.probability).to.equal(HarmProbability.MEDIUM); + expect(safetyRating.blocked).to.be.false; + }); + + it('should throw if videoMetadata is present in parts', () => { + const candidates: GoogleAIGenerateContentCandidate[] = [ + { + index: 0, + content: { + role: 'model', + parts: [ + { + inlineData: { mimeType: 'video/mp4', data: 'base64==' }, + videoMetadata: { startOffset: '0s', endOffset: '5s' } // Unsupported + } + ] + } + } + ]; + expect(() => mapGenerateContentCandidates(candidates)) + .to.throw(GenAIError, /Part.videoMetadata is not supported/i) + .with.property('code', GenAIErrorCode.UNSUPPORTED); + }); + + it('should handle candidates without citation or safety ratings', () => { + const candidates: GoogleAIGenerateContentCandidate[] = [ + { + index: 0, + content: { role: 'model', parts: [{ text: 'Simple text' }] }, + finishReason: FinishReason.STOP + } + ]; + const mapped = mapGenerateContentCandidates(candidates); + expect(mapped[0].citationMetadata).to.be.undefined; + expect(mapped[0].safetyRatings).to.be.undefined; + expect(mapped[0].content.parts[0].text).to.equal('Simple text'); + expect(loggerWarnStub).to.not.have.been.called; + }); + + it('should handle empty candidate array', () => { + const candidates: GoogleAIGenerateContentCandidate[] = []; + const mapped = mapGenerateContentCandidates(candidates); + expect(mapped).to.deep.equal([]); + expect(loggerWarnStub).to.not.have.been.called; + }); + }); + + describe('mapPromptFeedback', () => { + it('should add default safety rating properties and warn', () => { + const feedback: PromptFeedback = { + blockReason: BlockReason.OTHER, + safetyRatings: [ + { + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + probability: HarmProbability.HIGH, + blocked: true + // Missing severity, probabilityScore, severityScore + } as any + ] + // Missing blockReasonMessage + }; + const mapped = mapPromptFeedback(feedback); + expect(loggerWarnStub).to.have.been.calledOnceWith( + "PromptFeedback safety ratings' properties severity, severityScore, and probabilityScore are not included in responses from Google AI. Properties have been assigned to default values." + ); + expect(mapped.safetyRatings).to.exist; + const safetyRating = mapped.safetyRatings[0] as SafetyRating; // Type assertion + expect(safetyRating.severity).to.equal( + HarmSeverity.HARM_SEVERITY_UNSUPPORTED + ); + expect(safetyRating.probabilityScore).to.equal(0); + expect(safetyRating.severityScore).to.equal(0); + // Existing properties should be preserved + expect(safetyRating.category).to.equal( + HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT + ); + expect(safetyRating.probability).to.equal(HarmProbability.HIGH); + expect(safetyRating.blocked).to.be.true; + // Other properties + expect(mapped.blockReason).to.equal(BlockReason.OTHER); + expect(mapped.blockReasonMessage).to.be.undefined; // Not present in input + }); + }); +}); diff --git a/packages/vertexai/src/googleAIMappers.ts b/packages/vertexai/src/googleAIMappers.ts new file mode 100644 index 00000000000..e61a3532be1 --- /dev/null +++ b/packages/vertexai/src/googleAIMappers.ts @@ -0,0 +1,235 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GenAIError } from './errors'; +import { logger } from './logger'; +import { + CitationMetadata, + CountTokensRequest, + GenerateContentCandidate, + GenerateContentRequest, + GenerateContentResponse, + HarmSeverity, + InlineDataPart, + PromptFeedback, + SafetyRating, + GenAIErrorCode +} from './types'; +import { + GoogleAIGenerateContentResponse, + GoogleAIGenerateContentCandidate, + GoogleAICountTokensRequest +} from './types/googleAI'; + +/** + * This SDK supports both Vertex AI and Google AI APIs. + * The public API prioritizes the Vertex AI API. + * We avoid having two sets of types by translating requests and responses between the two API formats. + * We want to avoid two sets of types so that developers can switch between Vertex AI and Google AI + * with minimal changes to their code. + * + * In here are functions that map requests and responses between the two API formats. + * VertexAI requests defined by the user are mapped to Google AI requests before they're sent. + * Google AI responses are mapped to VertexAI responses so they can be returned to the user. + */ + +/** + * Maps a Vertex AI {@link GenerateContentRequest} to a format that can be sent to Google AI. + * + * @param generateContentRequest The {@link GenerateContentRequest} to map. + * @returns A {@link GenerateContentResponse} that conforms to the Google AI format. + * + * @throws If the request contains properties that are unsupported by Google AI. + * + * @internal + */ +export function mapGenerateContentRequest( + generateContentRequest: GenerateContentRequest +): GenerateContentRequest { + generateContentRequest.safetySettings?.forEach(safetySetting => { + if (safetySetting.method) { + throw new GenAIError( + GenAIErrorCode.UNSUPPORTED, + 'SafetySetting.method is not supported in the Google AI. Please remove this property.' + ); + } + }); + + if (generateContentRequest.generationConfig?.topK) { + const roundedTopK = Math.round( + generateContentRequest.generationConfig.topK + ); + + if (roundedTopK !== generateContentRequest.generationConfig.topK) { + logger.warn( + 'topK in GenerationConfig has been rounded to the nearest integer.' + ); + generateContentRequest.generationConfig.topK = roundedTopK; + } + } + + return generateContentRequest; +} + +/** + * Maps a {@link GenerateContentResponse} from Google AI to the format of the + * {@link GenerateContentResponse} that we get from VertexAI that is exposed in the public API. + * + * @param googleAIResponse The {@link GenerateContentResponse} from Google AI. + * @returns A {@link GenerateContentResponse} that conforms to the public API's format. + * + * @internal + */ +export function mapGenerateContentResponse( + googleAIResponse: GoogleAIGenerateContentResponse +): GenerateContentResponse { + const generateContentResponse = { + candidates: googleAIResponse.candidates + ? mapGenerateContentCandidates(googleAIResponse.candidates) + : undefined, + prompt: googleAIResponse.promptFeedback + ? mapPromptFeedback(googleAIResponse.promptFeedback) + : undefined, + usageMetadata: googleAIResponse.usageMetadata + }; + + return generateContentResponse; +} + +/** + * Maps a Vertex AI {@link CountTokensRequest} to a format that can be sent to Google AI. + * + * @param countTokensRequest The {@link CountTokensRequest} to map. + * @param model The model to count tokens with. + * @returns A {@link CountTokensRequest} that conforms to the Google AI format. + * + * @internal + */ +export function mapCountTokensRequest( + countTokensRequest: CountTokensRequest, + model: string +): GoogleAICountTokensRequest { + const mappedCountTokensRequest: GoogleAICountTokensRequest = { + generateContentRequest: { + model, + contents: countTokensRequest.contents, + systemInstruction: countTokensRequest.systemInstruction, + tools: countTokensRequest.tools, + generationConfig: countTokensRequest.generationConfig + } + }; + + return mappedCountTokensRequest; +} + +/** + * Maps a Google AI {@link GoogleAIGenerateContentCandidate} to a format that conforms + * to the Vertex AI API format. + * + * @param candidates The {@link GoogleAIGenerateContentCandidate} to map. + * @returns A {@link GenerateContentCandidate} that conforms to the Vertex AI format. + * + * @throws If any {@link Part} in the candidates has a `videoMetadata` property. + * + * @internal + */ +export function mapGenerateContentCandidates( + candidates: GoogleAIGenerateContentCandidate[] +): GenerateContentCandidate[] { + const mappedCandidates: GenerateContentCandidate[] = []; + let mappedSafetyRatings: SafetyRating[]; + if (mappedCandidates) { + candidates.forEach(candidate => { + // Map citationSources to citations. + let citationMetadata: CitationMetadata | undefined; + if (candidate.citationMetadata) { + citationMetadata = { + citations: candidate.citationMetadata.citationSources + }; + } + + // Assign missing candidate SafetyRatings properties to their defaults. + if (candidate.safetyRatings) { + logger.warn( + "Candidate safety rating properties 'severity', 'severityScore', and 'probabilityScore' are not included in responses from Google AI. Properties have been assigned to default values." + ); + mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { + return { + ...safetyRating, + severity: HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: 0, + severityScore: 0 + }; + }); + } + + // videoMetadata is not supported. + // Throw early since developers may send a long video as input and only expect to pay + // for inference on a small portion of the video. + if ( + candidate.content?.parts.some( + part => (part as InlineDataPart)?.videoMetadata + ) + ) { + throw new GenAIError( + GenAIErrorCode.UNSUPPORTED, + 'Part.videoMetadata is not supported in Google AI. Please remove this property.' + ); + } + + const mappedCandidate = { + index: candidate.index, + content: candidate.content, + finishReason: candidate.finishReason, + finishMessage: candidate.finishMessage, + safetyRatings: mappedSafetyRatings, + citationMetadata, + groundingMetadata: candidate.groundingMetadata + }; + mappedCandidates.push(mappedCandidate); + }); + } + + return mappedCandidates; +} + +export function mapPromptFeedback( + promptFeedback: PromptFeedback +): PromptFeedback { + // Assign missing PromptFeedback SafetyRatings properties to their defaults. + const mappedSafetyRatings: SafetyRating[] = []; + promptFeedback.safetyRatings.forEach(safetyRating => { + mappedSafetyRatings.push({ + category: safetyRating.category, + probability: safetyRating.probability, + severity: HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: 0, + severityScore: 0, + blocked: safetyRating.blocked + }); + }); + logger.warn( + "PromptFeedback safety ratings' properties severity, severityScore, and probabilityScore are not included in responses from Google AI. Properties have been assigned to default values." + ); + + const mappedPromptFeedback: PromptFeedback = { + blockReason: promptFeedback.blockReason, + safetyRatings: mappedSafetyRatings, + blockReasonMessage: promptFeedback.blockReasonMessage + }; + return mappedPromptFeedback; +} diff --git a/packages/vertexai/src/helpers.test.ts b/packages/vertexai/src/helpers.test.ts new file mode 100644 index 00000000000..f7316e3f119 --- /dev/null +++ b/packages/vertexai/src/helpers.test.ts @@ -0,0 +1,140 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { expect } from 'chai'; +import { GENAI_TYPE } from './constants'; +import { encodeInstanceIdentifier, decodeInstanceIdentifier } from './helpers'; +import { GenAIError } from './errors'; +import { BackendType } from './public-types'; +import { InstanceIdentifier } from './types/internal'; +import { GenAIErrorCode } from './types'; + +describe('Identifier Encoding/Decoding', () => { + describe('encodeInstanceIdentifier', () => { + it('should encode Vertex AI identifier with a specific location', () => { + const identifier: InstanceIdentifier = { + backendType: BackendType.VERTEX_AI, + location: 'us-central1' + }; + console.log(identifier); + const expected = `${GENAI_TYPE}/vertexai/us-central1`; + expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + }); + + it('should encode Vertex AI identifier using empty location', () => { + const identifier: InstanceIdentifier = { + backendType: BackendType.VERTEX_AI, + location: '' + }; + const expected = `${GENAI_TYPE}/vertexai/`; + expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + }); + + it('should encode Google AI identifier', () => { + const identifier: InstanceIdentifier = { + backendType: BackendType.GOOGLE_AI + }; + const expected = `${GENAI_TYPE}/googleai`; + expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + }); + + it('should throw GenAIError for unknown backend type', () => { + const identifier = { + backendType: 'some-future-backend' + } as any; // bypass type checking for the test + + expect(() => encodeInstanceIdentifier(identifier)).to.throw(GenAIError); + + try { + encodeInstanceIdentifier(identifier); + expect.fail('Expected encodeInstanceIdentifier to throw'); + } catch (e) { + expect(e).to.be.instanceOf(GenAIError); + const error = e as GenAIError; + expect(error.message).to.contain(`Unknown backend`); + expect(error.code).to.equal(GenAIErrorCode.ERROR); + } + }); + }); + + describe('decodeInstanceIdentifier', () => { + it('should decode Vertex AI identifier with location', () => { + const encoded = `${GENAI_TYPE}/vertexai/europe-west1`; + const expected: InstanceIdentifier = { + backendType: BackendType.VERTEX_AI, + location: 'europe-west1' + }; + expect(decodeInstanceIdentifier(encoded)).to.deep.equal(expected); + }); + + it('should throw an error if Vertex AI identifier string without explicit location part', () => { + const encoded = `${GENAI_TYPE}/vertexai`; + expect(() => decodeInstanceIdentifier(encoded)).to.throw(GenAIError); + + try { + decodeInstanceIdentifier(encoded); + expect.fail('Expected encodeInstanceIdentifier to throw'); + } catch (e) { + expect(e).to.be.instanceOf(GenAIError); + const error = e as GenAIError; + expect(error.message).to.contain( + `Invalid instance identifier, unknown location` + ); + expect(error.code).to.equal(GenAIErrorCode.ERROR); + } + }); + + it('should decode Google AI identifier', () => { + const encoded = `${GENAI_TYPE}/googleai`; + const expected: InstanceIdentifier = { + backendType: BackendType.GOOGLE_AI + }; + expect(decodeInstanceIdentifier(encoded)).to.deep.equal(expected); + }); + + it('should throw GenAIError for invalid backend string', () => { + const encoded = `${GENAI_TYPE}/someotherbackend/location`; + expect(() => decodeInstanceIdentifier(encoded)).to.throw( + GenAIError, + `Invalid instance identifier string: '${encoded}'` + ); + try { + decodeInstanceIdentifier(encoded); + expect.fail('Expected decodeInstanceIdentifier to throw'); + } catch (e) { + expect(e).to.be.instanceOf(GenAIError); + expect((e as GenAIError).code).to.equal(GenAIErrorCode.ERROR); + } + }); + + it('should throw GenAIError for malformed identifier string (too few parts)', () => { + const encoded = GENAI_TYPE; + expect(() => decodeInstanceIdentifier(encoded)).to.throw( + GenAIError, + `Invalid instance identifier string: '${encoded}'` + ); + }); + + it('should throw GenAIError for malformed identifier string (incorrect prefix)', () => { + const encoded = 'firebase/vertexai/location'; + // This will also hit the default case in the switch statement + expect(() => decodeInstanceIdentifier(encoded)).to.throw( + GenAIError, + `Invalid instance identifier, unknown prefix 'firebase'` + ); + }); + }); +}); diff --git a/packages/vertexai/src/helpers.ts b/packages/vertexai/src/helpers.ts new file mode 100644 index 00000000000..28f11a4b2bd --- /dev/null +++ b/packages/vertexai/src/helpers.ts @@ -0,0 +1,86 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { GENAI_TYPE } from './constants'; +import { GenAIError } from './errors'; +import { BackendType } from './public-types'; +import { InstanceIdentifier } from './types/internal'; +import { GenAIErrorCode } from './types'; + +/** + * Encodes an {@link InstanceIdentifier} into a string. + * + * This string is used to identify unique {@link GenAI} instances by backend type. + * + * @internal + */ +export function encodeInstanceIdentifier( + instanceIdentifier: InstanceIdentifier +): string { + switch (instanceIdentifier.backendType) { + case BackendType.VERTEX_AI: + return `${GENAI_TYPE}/vertexai/${instanceIdentifier.location}`; + case BackendType.GOOGLE_AI: + return `${GENAI_TYPE}/googleai`; + default: + throw new GenAIError( + GenAIErrorCode.ERROR, + `Unknown backend '${instanceIdentifier}'` + ); + } +} + +/** + * Decodes an instance identifier string into an {@link InstanceIdentifier}. + * + * @internal + */ +export function decodeInstanceIdentifier( + instanceIdentifier: string +): InstanceIdentifier { + const identifierParts = instanceIdentifier.split('/'); + if (identifierParts[0] !== GENAI_TYPE) { + throw new GenAIError( + GenAIErrorCode.ERROR, + `Invalid instance identifier, unknown prefix '${identifierParts[0]}'` + ); + } + const backend = identifierParts[1]; + switch (backend) { + case 'vertexai': + const location: string | undefined = identifierParts[2]; + if (!location) { + throw new GenAIError( + GenAIErrorCode.ERROR, + `Invalid instance identifier, unknown location '${instanceIdentifier}'` + ); + } + return { + backendType: BackendType.VERTEX_AI, + location + }; + case 'googleai': + return { + backendType: BackendType.GOOGLE_AI + }; + default: + throw new GenAIError( + GenAIErrorCode.ERROR, + `Invalid instance identifier string: '${instanceIdentifier}'` + ); + } +} diff --git a/packages/vertexai/src/index.node.ts b/packages/vertexai/src/index.node.ts index 6a18788141a..414aefdaffb 100644 --- a/packages/vertexai/src/index.node.ts +++ b/packages/vertexai/src/index.node.ts @@ -22,21 +22,35 @@ */ import { registerVersion, _registerComponent } from '@firebase/app'; -import { VertexAIService } from './service'; -import { VERTEX_TYPE } from './constants'; +import { GenAIService } from './service'; +import { DEFAULT_INSTANCE_IDENTIFER, GENAI_TYPE } from './constants'; import { Component, ComponentType } from '@firebase/component'; import { name, version } from '../package.json'; +import { InstanceIdentifier } from './types/internal'; +import { decodeInstanceIdentifier } from './helpers'; -function registerVertex(): void { +function registerGenAI(): void { _registerComponent( new Component( - VERTEX_TYPE, - (container, { instanceIdentifier: location }) => { + GENAI_TYPE, + (container, options) => { // getImmediate for FirebaseApp will always succeed const app = container.getProvider('app').getImmediate(); const auth = container.getProvider('auth-internal'); const appCheckProvider = container.getProvider('app-check-internal'); - return new VertexAIService(app, auth, appCheckProvider, { location }); + + let instanceIdentifier: InstanceIdentifier; + if (options.instanceIdentifier) { + instanceIdentifier = decodeInstanceIdentifier( + options.instanceIdentifier + ); + } else { + instanceIdentifier = DEFAULT_INSTANCE_IDENTIFER; + } + + const backend = instanceIdentifier; + + return new GenAIService(app, backend, auth, appCheckProvider); }, ComponentType.PUBLIC ).setMultipleInstances(true) @@ -47,7 +61,7 @@ function registerVertex(): void { registerVersion(name, version, '__BUILD_TARGET__'); } -registerVertex(); +registerGenAI(); export * from './api'; export * from './public-types'; diff --git a/packages/vertexai/src/index.ts b/packages/vertexai/src/index.ts index 5d646e8d9d0..985469c5ce1 100644 --- a/packages/vertexai/src/index.ts +++ b/packages/vertexai/src/index.ts @@ -6,7 +6,7 @@ /** * @license - * Copyright 2024 Google LLC + * Copyright 2025 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,10 +22,13 @@ */ import { registerVersion, _registerComponent } from '@firebase/app'; -import { VertexAIService } from './service'; -import { VERTEX_TYPE } from './constants'; +import { GenAIService } from './service'; +import { GENAI_TYPE } from './constants'; import { Component, ComponentType } from '@firebase/component'; import { name, version } from '../package.json'; +import { decodeInstanceIdentifier } from './helpers'; +import { GenAIError } from './api'; +import { GenAIErrorCode } from './types'; declare global { interface Window { @@ -33,16 +36,24 @@ declare global { } } -function registerVertex(): void { +function registerGenAI(): void { _registerComponent( new Component( - VERTEX_TYPE, - (container, { instanceIdentifier: location }) => { + GENAI_TYPE, + (container, { instanceIdentifier }) => { + if (!instanceIdentifier) { + throw new GenAIError( + GenAIErrorCode.ERROR, + 'GenAIService instance identifier is undefined.' + ); + } + + const backend = decodeInstanceIdentifier(instanceIdentifier); // getImmediate for FirebaseApp will always succeed const app = container.getProvider('app').getImmediate(); const auth = container.getProvider('auth-internal'); const appCheckProvider = container.getProvider('app-check-internal'); - return new VertexAIService(app, auth, appCheckProvider, { location }); + return new GenAIService(app, backend, auth, appCheckProvider); }, ComponentType.PUBLIC ).setMultipleInstances(true) @@ -53,7 +64,7 @@ function registerVertex(): void { registerVersion(name, version, '__BUILD_TARGET__'); } -registerVertex(); +registerGenAI(); export * from './api'; export * from './public-types'; diff --git a/packages/vertexai/src/methods/chat-session-helpers.ts b/packages/vertexai/src/methods/chat-session-helpers.ts index 899db4f626a..a1a1713f584 100644 --- a/packages/vertexai/src/methods/chat-session-helpers.ts +++ b/packages/vertexai/src/methods/chat-session-helpers.ts @@ -15,14 +15,8 @@ * limitations under the License. */ -import { - Content, - POSSIBLE_ROLES, - Part, - Role, - VertexAIErrorCode -} from '../types'; -import { VertexAIError } from '../errors'; +import { Content, POSSIBLE_ROLES, Part, Role, GenAIErrorCode } from '../types'; +import { GenAIError } from '../errors'; // https://ai.google.dev/api/rest/v1beta/Content#part @@ -54,14 +48,14 @@ export function validateChatHistory(history: Content[]): void { for (const currContent of history) { const { role, parts } = currContent; if (!prevContent && role !== 'user') { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}` ); } if (!POSSIBLE_ROLES.includes(role)) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify( POSSIBLE_ROLES )}` @@ -69,15 +63,15 @@ export function validateChatHistory(history: Content[]): void { } if (!Array.isArray(parts)) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `Content should have 'parts' but property with an array of Parts` ); } if (parts.length === 0) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `Each Content should have at least one part` ); } @@ -99,8 +93,8 @@ export function validateChatHistory(history: Content[]): void { const validParts = VALID_PARTS_PER_ROLE[role]; for (const key of VALID_PART_FIELDS) { if (!validParts.includes(key) && countFields[key] > 0) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part` ); } @@ -109,8 +103,8 @@ export function validateChatHistory(history: Content[]): void { if (prevContent) { const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; if (!validPreviousContentRoles.includes(prevContent.role)) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, `Content with role '${role} can't follow '${ prevContent.role }'. Valid previous roles: ${JSON.stringify( diff --git a/packages/vertexai/src/methods/chat-session.test.ts b/packages/vertexai/src/methods/chat-session.test.ts index bd389a3d778..cbfcd22e3e0 100644 --- a/packages/vertexai/src/methods/chat-session.test.ts +++ b/packages/vertexai/src/methods/chat-session.test.ts @@ -23,6 +23,7 @@ import * as generateContentMethods from './generate-content'; import { GenerateContentStreamResult } from '../types'; import { ChatSession } from './chat-session'; import { ApiSettings } from '../types/internal'; +import { vertexAIBackend } from '../api'; use(sinonChai); use(chaiAsPromised); @@ -31,7 +32,8 @@ const fakeApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', - location: 'us-central1' + location: 'us-central1', + backend: vertexAIBackend() }; describe('ChatSession', () => { diff --git a/packages/vertexai/src/methods/count-tokens.test.ts b/packages/vertexai/src/methods/count-tokens.test.ts index 9eccbf702fe..bdf99711f55 100644 --- a/packages/vertexai/src/methods/count-tokens.test.ts +++ b/packages/vertexai/src/methods/count-tokens.test.ts @@ -16,7 +16,7 @@ */ import { expect, use } from 'chai'; -import { match, restore, stub } from 'sinon'; +import Sinon, { match, restore, stub } from 'sinon'; import sinonChai from 'sinon-chai'; import chaiAsPromised from 'chai-as-promised'; import { getMockResponse } from '../../test-utils/mock-response'; @@ -25,6 +25,8 @@ import { countTokens } from './count-tokens'; import { CountTokensRequest } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; +import { googleAIBackend, vertexAIBackend } from '../api'; +import { mapCountTokensRequest } from '../googleAIMappers'; use(sinonChai); use(chaiAsPromised); @@ -33,7 +35,16 @@ const fakeApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', - location: 'us-central1' + location: 'us-central1', + backend: vertexAIBackend() +}; + +const fakeGoogleAIApiSettings: ApiSettings = { + apiKey: 'key', + project: 'my-project', + appId: 'my-appid', + location: 'us-central1', + backend: googleAIBackend() }; const fakeRequestParams: CountTokensRequest = { @@ -139,4 +150,30 @@ describe('countTokens()', () => { ).to.be.rejectedWith(/404.*not found/); expect(mockFetch).to.be.called; }); + describe('googleAI', () => { + let makeRequestStub: Sinon.SinonStub; + + beforeEach(() => { + makeRequestStub = stub(request, 'makeRequest'); + }); + + afterEach(() => { + restore(); + }); + + it('maps request to GoogleAI format', async () => { + makeRequestStub.resolves({ ok: true, json: () => {} } as Response); // Unused + + await countTokens(fakeGoogleAIApiSettings, 'model', fakeRequestParams); + + expect(makeRequestStub).to.be.calledWith( + 'model', + Task.COUNT_TOKENS, + fakeGoogleAIApiSettings, + false, + JSON.stringify(mapCountTokensRequest(fakeRequestParams, 'model')), + undefined + ); + }); + }); }); diff --git a/packages/vertexai/src/methods/count-tokens.ts b/packages/vertexai/src/methods/count-tokens.ts index c9d43a5b6fd..3a94f181126 100644 --- a/packages/vertexai/src/methods/count-tokens.ts +++ b/packages/vertexai/src/methods/count-tokens.ts @@ -22,6 +22,8 @@ import { } from '../types'; import { Task, makeRequest } from '../requests/request'; import { ApiSettings } from '../types/internal'; +import * as GoogleAIMapper from '../googleAIMappers'; +import { BackendType } from '../public-types'; export async function countTokens( apiSettings: ApiSettings, @@ -29,12 +31,19 @@ export async function countTokens( params: CountTokensRequest, requestOptions?: RequestOptions ): Promise { + let body: string = ''; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + const mappedParams = GoogleAIMapper.mapCountTokensRequest(params, model); + body = JSON.stringify(mappedParams); + } else { + body = JSON.stringify(params); + } const response = await makeRequest( model, Task.COUNT_TOKENS, apiSettings, false, - JSON.stringify(params), + body, requestOptions ); return response.json(); diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index 1d15632f828..181d7d45b84 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -16,13 +16,14 @@ */ import { expect, use } from 'chai'; -import { match, restore, stub } from 'sinon'; +import Sinon, { match, restore, stub } from 'sinon'; import sinonChai from 'sinon-chai'; import chaiAsPromised from 'chai-as-promised'; import { getMockResponse } from '../../test-utils/mock-response'; import * as request from '../requests/request'; import { generateContent } from './generate-content'; import { + GenAIErrorCode, GenerateContentRequest, HarmBlockMethod, HarmBlockThreshold, @@ -30,6 +31,8 @@ import { } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; +import { GenAIError, googleAIBackend, vertexAIBackend } from '../api'; +import { mapGenerateContentRequest } from '../googleAIMappers'; use(sinonChai); use(chaiAsPromised); @@ -38,7 +41,16 @@ const fakeApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', - location: 'us-central1' + location: 'us-central1', + backend: vertexAIBackend() +}; + +const fakeGoogleAIApiSettings: ApiSettings = { + apiKey: 'key', + project: 'my-project', + appId: 'my-appid', + location: 'us-central1', + backend: googleAIBackend() }; const fakeRequestParams: GenerateContentRequest = { @@ -55,6 +67,19 @@ const fakeRequestParams: GenerateContentRequest = { ] }; +const fakeGoogleAIRequestParams: GenerateContentRequest = { + contents: [{ parts: [{ text: 'hello' }], role: 'user' }], + generationConfig: { + topK: 16 + }, + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ] +}; + describe('generateContent()', () => { afterEach(() => { restore(); @@ -78,9 +103,7 @@ describe('generateContent()', () => { Task.GENERATE_CONTENT, fakeApiSettings, false, - match((value: string) => { - return value.includes('contents'); - }), + JSON.stringify(fakeRequestParams), undefined ); }); @@ -289,4 +312,66 @@ describe('generateContent()', () => { ); expect(mockFetch).to.be.called; }); + describe('googleAI', () => { + let makeRequestStub: Sinon.SinonStub; + + beforeEach(() => { + makeRequestStub = stub(request, 'makeRequest'); + }); + + afterEach(() => { + restore(); + }); + + it('throws error when method is defined', async () => { + const mockResponse = getMockResponse( + 'googleAI', + 'unary-success-basic-reply-short.txt' + ); + makeRequestStub.resolves(mockResponse as Response); + + const requestParamsWithMethod: GenerateContentRequest = { + contents: [{ parts: [{ text: 'hello' }], role: 'user' }], + safetySettings: [ + { + category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, + method: HarmBlockMethod.SEVERITY // Unsupported in Google AI. + } + ] + }; + + // Expect generateContent to throw a GenAIError that method is not supported. + await expect( + generateContent( + fakeGoogleAIApiSettings, + 'model', + requestParamsWithMethod + ) + ).to.be.rejectedWith(GenAIError, GenAIErrorCode.UNSUPPORTED); + expect(makeRequestStub).to.not.be.called; + }); + it('maps request to GoogleAI format', async () => { + const mockResponse = getMockResponse( + 'googleAI', + 'unary-success-basic-reply-short.txt' + ); + makeRequestStub.resolves(mockResponse as Response); + + await generateContent( + fakeGoogleAIApiSettings, + 'model', + fakeGoogleAIRequestParams + ); + + expect(makeRequestStub).to.be.calledWith( + 'model', + Task.GENERATE_CONTENT, + fakeGoogleAIApiSettings, + false, + JSON.stringify(mapGenerateContentRequest(fakeGoogleAIRequestParams)), + undefined + ); + }); + }); }); diff --git a/packages/vertexai/src/methods/generate-content.ts b/packages/vertexai/src/methods/generate-content.ts index 0944b38016a..f05ca41c0bc 100644 --- a/packages/vertexai/src/methods/generate-content.ts +++ b/packages/vertexai/src/methods/generate-content.ts @@ -26,6 +26,8 @@ import { Task, makeRequest } from '../requests/request'; import { createEnhancedContentResponse } from '../requests/response-helpers'; import { processStream } from '../requests/stream-reader'; import { ApiSettings } from '../types/internal'; +import * as GoogleAIMapper from '../googleAIMappers'; +import { BackendType } from '../public-types'; export async function generateContentStream( apiSettings: ApiSettings, @@ -33,6 +35,9 @@ export async function generateContentStream( params: GenerateContentRequest, requestOptions?: RequestOptions ): Promise { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = GoogleAIMapper.mapGenerateContentRequest(params); + } const response = await makeRequest( model, Task.STREAM_GENERATE_CONTENT, @@ -41,7 +46,7 @@ export async function generateContentStream( JSON.stringify(params), requestOptions ); - return processStream(response); + return processStream(response, apiSettings); // TODO: Map streaming responses } export async function generateContent( @@ -50,6 +55,9 @@ export async function generateContent( params: GenerateContentRequest, requestOptions?: RequestOptions ): Promise { + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + params = GoogleAIMapper.mapGenerateContentRequest(params); + } const response = await makeRequest( model, Task.GENERATE_CONTENT, @@ -58,9 +66,26 @@ export async function generateContent( JSON.stringify(params), requestOptions ); - const responseJson: GenerateContentResponse = await response.json(); - const enhancedResponse = createEnhancedContentResponse(responseJson); + const generateContentResponse = await handleGenerateContentResponse( + response, + apiSettings + ); + const enhancedResponse = createEnhancedContentResponse( + generateContentResponse + ); return { response: enhancedResponse }; } + +async function handleGenerateContentResponse( + response: Response, + apiSettings: ApiSettings +): Promise { + const responseJson = await response.json(); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return GoogleAIMapper.mapGenerateContentResponse(responseJson); + } else { + return responseJson; + } +} diff --git a/packages/vertexai/src/models/vertexai-model.test.ts b/packages/vertexai/src/models/genai-model.test.ts similarity index 65% rename from packages/vertexai/src/models/vertexai-model.test.ts rename to packages/vertexai/src/models/genai-model.test.ts index 7aa7f806e7f..16bd54d9f6f 100644 --- a/packages/vertexai/src/models/vertexai-model.test.ts +++ b/packages/vertexai/src/models/genai-model.test.ts @@ -15,24 +15,25 @@ * limitations under the License. */ import { use, expect } from 'chai'; -import { VertexAI, VertexAIErrorCode } from '../public-types'; +import { GenAI, GenAIErrorCode } from '../public-types'; import sinonChai from 'sinon-chai'; -import { VertexAIModel } from './vertexai-model'; -import { VertexAIError } from '../errors'; +import { GenAIModel } from './genai-model'; +import { GenAIError } from '../errors'; +import { vertexAIBackend } from '../api'; use(sinonChai); /** - * A class that extends VertexAIModel that allows us to test the protected constructor. + * A class that extends GenAIModel that allows us to test the protected constructor. */ -class TestModel extends VertexAIModel { +class TestModel extends GenAIModel { /* eslint-disable @typescript-eslint/no-useless-constructor */ - constructor(vertexAI: VertexAI, modelName: string) { - super(vertexAI, modelName); + constructor(genAI: GenAI, modelName: string) { + super(genAI, modelName); } } -const fakeVertexAI: VertexAI = { +const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -42,31 +43,32 @@ const fakeVertexAI: VertexAI = { appId: 'my-appid' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; -describe('VertexAIModel', () => { +describe('GenAIModel', () => { it('handles plain model name', () => { - const testModel = new TestModel(fakeVertexAI, 'my-model'); + const testModel = new TestModel(fakeGenAI, 'my-model'); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles models/ prefixed model name', () => { - const testModel = new TestModel(fakeVertexAI, 'models/my-model'); + const testModel = new TestModel(fakeGenAI, 'models/my-model'); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles full model name', () => { const testModel = new TestModel( - fakeVertexAI, + fakeGenAI, 'publishers/google/models/my-model' ); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles prefixed tuned model name', () => { - const testModel = new TestModel(fakeVertexAI, 'tunedModels/my-model'); + const testModel = new TestModel(fakeGenAI, 'tunedModels/my-model'); expect(testModel.model).to.equal('tunedModels/my-model'); }); it('throws if not passed an api key', () => { - const fakeVertexAI: VertexAI = { + const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -74,16 +76,17 @@ describe('VertexAIModel', () => { projectId: 'my-project' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; try { - new TestModel(fakeVertexAI, 'my-model'); + new TestModel(fakeGenAI, 'my-model'); } catch (e) { - expect((e as VertexAIError).code).to.equal(VertexAIErrorCode.NO_API_KEY); + expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_API_KEY); } }); it('throws if not passed a project ID', () => { - const fakeVertexAI: VertexAI = { + const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -91,18 +94,17 @@ describe('VertexAIModel', () => { apiKey: 'key' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; try { - new TestModel(fakeVertexAI, 'my-model'); + new TestModel(fakeGenAI, 'my-model'); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.NO_PROJECT_ID - ); + expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_PROJECT_ID); } }); it('throws if not passed an app ID', () => { - const fakeVertexAI: VertexAI = { + const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -111,12 +113,13 @@ describe('VertexAIModel', () => { projectId: 'my-project' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; try { - new TestModel(fakeVertexAI, 'my-model'); + new TestModel(fakeGenAI, 'my-model'); } catch (e) { - expect((e as VertexAIError).code).to.equal(VertexAIErrorCode.NO_APP_ID); + expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_APP_ID); } }); }); diff --git a/packages/vertexai/src/models/vertexai-model.ts b/packages/vertexai/src/models/genai-model.ts similarity index 61% rename from packages/vertexai/src/models/vertexai-model.ts rename to packages/vertexai/src/models/genai-model.ts index cac14845961..28e67c8552d 100644 --- a/packages/vertexai/src/models/vertexai-model.ts +++ b/packages/vertexai/src/models/genai-model.ts @@ -15,9 +15,9 @@ * limitations under the License. */ -import { VertexAIError } from '../errors'; -import { VertexAI, VertexAIErrorCode } from '../public-types'; -import { VertexAIService } from '../service'; +import { GenAIError } from '../errors'; +import { GenAIErrorCode, GenAI, BackendType } from '../public-types'; +import { GenAIService } from '../service'; import { ApiSettings } from '../types/internal'; import { _isFirebaseServerApp } from '@firebase/app'; @@ -26,7 +26,7 @@ import { _isFirebaseServerApp } from '@firebase/app'; * * @public */ -export abstract class VertexAIModel { +export abstract class GenAIModel { /** * The fully qualified model resource name to use for generating images * (for example, `publishers/google/models/imagen-3.0-generate-002`). @@ -39,12 +39,12 @@ export abstract class VertexAIModel { protected _apiSettings: ApiSettings; /** - * Constructs a new instance of the {@link VertexAIModel} class. + * Constructs a new instance of the {@link GenAIModel} class. * * This constructor should only be called from subclasses that provide * a model API. * - * @param vertexAI - An instance of the Vertex AI in Firebase SDK. + * @param genAI - A {@link GenAI} instance. * @param modelName - The name of the model being used. It can be in one of the following formats: * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) * - `models/my-model` (will resolve to `publishers/google/models/my-model`) @@ -55,51 +55,52 @@ export abstract class VertexAIModel { * * @internal */ - protected constructor(vertexAI: VertexAI, modelName: string) { - this.model = VertexAIModel.normalizeModelName(modelName); - - if (!vertexAI.app?.options?.apiKey) { - throw new VertexAIError( - VertexAIErrorCode.NO_API_KEY, + protected constructor(genAI: GenAI, modelName: string) { + if (!genAI.app?.options?.apiKey) { + throw new GenAIError( + GenAIErrorCode.NO_API_KEY, `The "apiKey" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid API key.` ); - } else if (!vertexAI.app?.options?.projectId) { - throw new VertexAIError( - VertexAIErrorCode.NO_PROJECT_ID, + } else if (!genAI.app?.options?.projectId) { + throw new GenAIError( + GenAIErrorCode.NO_PROJECT_ID, `The "projectId" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid project ID.` ); - } else if (!vertexAI.app?.options?.appId) { - throw new VertexAIError( - VertexAIErrorCode.NO_APP_ID, + } else if (!genAI.app?.options?.appId) { + throw new GenAIError( + GenAIErrorCode.NO_APP_ID, `The "appId" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid app ID.` ); } else { this._apiSettings = { - apiKey: vertexAI.app.options.apiKey, - project: vertexAI.app.options.projectId, - appId: vertexAI.app.options.appId, + apiKey: genAI.app.options.apiKey, + project: genAI.app.options.projectId, + appId: genAI.app.options.appId, automaticDataCollectionEnabled: - vertexAI.app.automaticDataCollectionEnabled, - location: vertexAI.location + genAI.app.automaticDataCollectionEnabled, + location: genAI.location, + backend: genAI.backend }; - if ( - _isFirebaseServerApp(vertexAI.app) && - vertexAI.app.settings.appCheckToken - ) { - const token = vertexAI.app.settings.appCheckToken; + if (_isFirebaseServerApp(genAI.app) && genAI.app.settings.appCheckToken) { + const token = genAI.app.settings.appCheckToken; this._apiSettings.getAppCheckToken = () => { return Promise.resolve({ token }); }; - } else if ((vertexAI as VertexAIService).appCheck) { + } else if ((genAI as GenAIService).appCheck) { this._apiSettings.getAppCheckToken = () => - (vertexAI as VertexAIService).appCheck!.getToken(); + (genAI as GenAIService).appCheck!.getToken(); } - if ((vertexAI as VertexAIService).auth) { + if ((genAI as GenAIService).auth) { this._apiSettings.getAuthToken = () => - (vertexAI as VertexAIService).auth!.getToken(); + (genAI as GenAIService).auth!.getToken(); } + + this.model = GenAIModel.normalizeModelName( + modelName, + this._apiSettings.backend.backendType + ); } } @@ -108,8 +109,31 @@ export abstract class VertexAIModel { * * @param modelName - The model name to normalize. * @returns The fully qualified model resource name. + * + * @internal + */ + static normalizeModelName( + modelName: string, + backendType: BackendType + ): string { + if (backendType === BackendType.GOOGLE_AI) { + return GenAIModel.normalizeGoogleAIModelName(modelName); + } else { + return GenAIModel.normalizeVertexAIModelName(modelName); + } + } + + /** + * @internal + */ + private static normalizeGoogleAIModelName(modelName: string): string { + return `models/${modelName}`; + } + + /** + * @internal */ - static normalizeModelName(modelName: string): string { + private static normalizeVertexAIModelName(modelName: string): string { let model: string; if (modelName.includes('/')) { if (modelName.startsWith('models/')) { diff --git a/packages/vertexai/src/models/generative-model.test.ts b/packages/vertexai/src/models/generative-model.test.ts index 987f9b115e2..a8245fe16ef 100644 --- a/packages/vertexai/src/models/generative-model.test.ts +++ b/packages/vertexai/src/models/generative-model.test.ts @@ -16,15 +16,16 @@ */ import { use, expect } from 'chai'; import { GenerativeModel } from './generative-model'; -import { FunctionCallingMode, VertexAI } from '../public-types'; +import { FunctionCallingMode, GenAI } from '../public-types'; import * as request from '../requests/request'; import { match, restore, stub } from 'sinon'; import { getMockResponse } from '../../test-utils/mock-response'; import sinonChai from 'sinon-chai'; +import { vertexAIBackend } from '../api'; use(sinonChai); -const fakeVertexAI: VertexAI = { +const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -34,12 +35,13 @@ const fakeVertexAI: VertexAI = { appId: 'my-appid' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; describe('GenerativeModel', () => { it('passes params through to generateContent', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', tools: [ { @@ -84,7 +86,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to generateContent', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', systemInstruction: 'be friendly' }); @@ -110,7 +112,7 @@ describe('GenerativeModel', () => { restore(); }); it('generateContent overrides model values', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', tools: [ { @@ -166,7 +168,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes params through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', tools: [ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } @@ -204,7 +206,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', systemInstruction: 'be friendly' }); @@ -230,7 +232,7 @@ describe('GenerativeModel', () => { restore(); }); it('startChat overrides model values', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model', tools: [ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } @@ -282,7 +284,7 @@ describe('GenerativeModel', () => { restore(); }); it('calls countTokens', async () => { - const genModel = new GenerativeModel(fakeVertexAI, { model: 'my-model' }); + const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model' }); const mockResponse = getMockResponse( 'vertexAI', 'unary-success-total-tokens.json' diff --git a/packages/vertexai/src/models/generative-model.ts b/packages/vertexai/src/models/generative-model.ts index 983118bf6ff..37880a794b7 100644 --- a/packages/vertexai/src/models/generative-model.ts +++ b/packages/vertexai/src/models/generative-model.ts @@ -41,14 +41,14 @@ import { formatGenerateContentInput, formatSystemInstruction } from '../requests/request-helpers'; -import { VertexAI } from '../public-types'; -import { VertexAIModel } from './vertexai-model'; +import { GenAI } from '../public-types'; +import { GenAIModel } from './genai-model'; /** * Class for generative model APIs. * @public */ -export class GenerativeModel extends VertexAIModel { +export class GenerativeModel extends GenAIModel { generationConfig: GenerationConfig; safetySettings: SafetySetting[]; requestOptions?: RequestOptions; @@ -57,11 +57,11 @@ export class GenerativeModel extends VertexAIModel { systemInstruction?: Content; constructor( - vertexAI: VertexAI, + genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions ) { - super(vertexAI, modelParams.model); + super(genAI, modelParams.model); this.generationConfig = modelParams.generationConfig || {}; this.safetySettings = modelParams.safetySettings || []; this.tools = modelParams.tools; diff --git a/packages/vertexai/src/models/imagen-model.test.ts b/packages/vertexai/src/models/imagen-model.test.ts index 9e534f2195a..e2a426a2416 100644 --- a/packages/vertexai/src/models/imagen-model.test.ts +++ b/packages/vertexai/src/models/imagen-model.test.ts @@ -20,18 +20,19 @@ import { ImagenAspectRatio, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, - VertexAI, - VertexAIErrorCode + GenAI, + GenAIErrorCode } from '../public-types'; import * as request from '../requests/request'; import sinonChai from 'sinon-chai'; -import { VertexAIError } from '../errors'; +import { GenAIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; import { match, restore, stub } from 'sinon'; +import { vertexAIBackend } from '../api'; use(sinonChai); -const fakeVertexAI: VertexAI = { +const fakeGenAI: GenAI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -41,6 +42,7 @@ const fakeVertexAI: VertexAI = { appId: 'my-appid' } }, + backend: vertexAIBackend('us-central1'), location: 'us-central1' }; @@ -54,7 +56,7 @@ describe('ImagenModel', () => { mockResponse as Response ); - const imagenModel = new ImagenModel(fakeVertexAI, { + const imagenModel = new ImagenModel(fakeGenAI, { model: 'my-model' }); const prompt = 'A photorealistic image of a toy boat at sea.'; @@ -75,7 +77,7 @@ describe('ImagenModel', () => { restore(); }); it('generateImages makes a request to predict with generation config and safety settings', async () => { - const imagenModel = new ImagenModel(fakeVertexAI, { + const imagenModel = new ImagenModel(fakeGenAI, { model: 'my-model', generationConfig: { negativePrompt: 'do not hallucinate', @@ -146,15 +148,15 @@ describe('ImagenModel', () => { json: mockResponse.json } as Response); - const imagenModel = new ImagenModel(fakeVertexAI, { + const imagenModel = new ImagenModel(fakeGenAI, { model: 'my-model' }); try { await imagenModel.generateImages('some inappropriate prompt.'); } catch (e) { - expect((e as VertexAIError).code).to.equal(VertexAIErrorCode.FETCH_ERROR); - expect((e as VertexAIError).message).to.include('400'); - expect((e as VertexAIError).message).to.include( + expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); + expect((e as GenAIError).message).to.include('400'); + expect((e as GenAIError).message).to.include( "Image generation failed with the following error: The prompt could not be submitted. This prompt contains sensitive words that violate Google's Responsible AI practices. Try rephrasing the prompt. If you think this was an error, send feedback." ); } finally { diff --git a/packages/vertexai/src/models/imagen-model.ts b/packages/vertexai/src/models/imagen-model.ts index 04514ef6ffd..9a36b3f6954 100644 --- a/packages/vertexai/src/models/imagen-model.ts +++ b/packages/vertexai/src/models/imagen-model.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { VertexAI } from '../public-types'; +import { GenAI } from '../public-types'; import { Task, makeRequest } from '../requests/request'; import { createPredictRequestBody } from '../requests/request-helpers'; import { handlePredictResponse } from '../requests/response-helpers'; @@ -28,7 +28,7 @@ import { ImagenGenerationResponse, ImagenSafetySettings } from '../types'; -import { VertexAIModel } from './vertexai-model'; +import { GenAIModel } from './genai-model'; /** * Class for Imagen model APIs. @@ -38,7 +38,7 @@ import { VertexAIModel } from './vertexai-model'; * @example * ```javascript * const imagen = new ImagenModel( - * vertexAI, + * genAI, * { * model: 'imagen-3.0-generate-002' * } @@ -52,7 +52,7 @@ import { VertexAIModel } from './vertexai-model'; * * @beta */ -export class ImagenModel extends VertexAIModel { +export class ImagenModel extends GenAIModel { /** * The Imagen generation configuration. */ @@ -65,7 +65,7 @@ export class ImagenModel extends VertexAIModel { /** * Constructs a new instance of the {@link ImagenModel} class. * - * @param vertexAI - An instance of the Vertex AI in Firebase SDK. + * @param genAI - A {@link GenAI} instance. * @param modelParams - Parameters to use when making requests to Imagen. * @param requestOptions - Additional options to use when making requests. * @@ -73,12 +73,12 @@ export class ImagenModel extends VertexAIModel { * Firebase config. */ constructor( - vertexAI: VertexAI, + genAI: GenAI, modelParams: ImagenModelParams, public requestOptions?: RequestOptions ) { const { model, generationConfig, safetySettings } = modelParams; - super(vertexAI, model); + super(genAI, model); this.generationConfig = generationConfig; this.safetySettings = safetySettings; } diff --git a/packages/vertexai/src/models/index.ts b/packages/vertexai/src/models/index.ts index aec06be26fd..3034aadedc8 100644 --- a/packages/vertexai/src/models/index.ts +++ b/packages/vertexai/src/models/index.ts @@ -15,6 +15,6 @@ * limitations under the License. */ -export * from './vertexai-model'; +export * from './genai-model'; export * from './generative-model'; export * from './imagen-model'; diff --git a/packages/vertexai/src/public-types.ts b/packages/vertexai/src/public-types.ts index fbc5d51084d..4dd7b3a53ba 100644 --- a/packages/vertexai/src/public-types.ts +++ b/packages/vertexai/src/public-types.ts @@ -21,20 +21,136 @@ export * from './types'; /** * An instance of the Vertex AI in Firebase SDK. + * + * For more information, refer to the documentation for the new {@link GenAI}. + * + * @public + */ +export type VertexAI = GenAI; + +/** + * Options when initializing the Vertex AI in Firebase SDK. + * + * @public + */ +export interface VertexAIOptions { + location?: string; +} + +/** + * An instance of the Firebase GenAI SDK. + * + * Do not create this instance directly. Instead, use {@link getGenAI | getGenAI()}. + * * @public */ -export interface VertexAI { +export interface GenAI { /** - * The {@link @firebase/app#FirebaseApp} this {@link VertexAI} instance is associated with. + * The {@link @firebase/app#FirebaseApp} this {@link GenAI} instance is associated with. */ app: FirebaseApp; + /** + * A {@link Backend} instance that specifies the backend configuration. + */ + backend: Backend; + /** + * The location configured for this GenAI service instance, relevant for Vertex AI backends. + * + * @deprecated use `GenAI.backend.location` instead. + */ location: string; } /** - * Options when initializing the Vertex AI in Firebase SDK. + * Union type representing the backend configuration for the GenAI service. + * This can be either a {@link GoogleAIBackend} or a + * {@link VertexAIBackend} configuration object. + * + * Create instances using {@link googleAIBackend | googleAIBackend() } or + * {@link vertexAIBackend | vertexAIBackend() }. + * * @public */ -export interface VertexAIOptions { - location?: string; +export type Backend = GoogleAIBackend | VertexAIBackend; + +/** + * Represents the configuration object for the Google AI backend. + * Use this with {@link GenAIOptions} when initializing the service with + * {@link getGenAI | getGenAI()}. + * Create an instance using {@link googleAIBackend | googleAIBackend()}. + * + * @public + */ +export type GoogleAIBackend = { + /** + * Specifies the backend type as Google AI. + */ + backendType: typeof BackendType.GOOGLE_AI; +}; + +/** + * Represents the configuration object for the Vertex AI backend. + * Use this with {@link GenAIOptions} when initializing the server with + * {@link getGenAI | getGenAI() }. + * Create an instance using {@link vertexAIBackend | vertexAIBackend() } function. + * + * @public + */ +export type VertexAIBackend = { + /** + * Specifies the backend type as Vertex AI. + */ + backendType: typeof BackendType.VERTEX_AI; + /** + * The region identifier, defaulting to `us-central1`; see {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + location: string; +}; + +/** + * An enum-like object containing constants that represent the supported backends + * for the Firebase GenAI SDK. + * + * These values are assigned to the `backendType` property within the specific backend + * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify + * which service to target. + * + * @public + */ +export const BackendType = { + /** + * Identifies the Vertex AI backend service provided through Google Cloud. + * Use this constant when creating a {@link VertexAIBackend} configuration. + */ + VERTEX_AI: 'VERTEX_AI', + + /** + * Identifies the Google AI backend service (often associated with models available + * through Google AI Studio, like Gemini). + * Use this constant when creating a {@link GoogleAIBackend} configuration. + */ + GOOGLE_AI: 'GOOGLE_AI' +} as const; // Using 'as const' makes the string values literal types + +/** + * Type alias representing valid backend types. + * It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. + * + * @public + */ +export type BackendType = (typeof BackendType)[keyof typeof BackendType]; + +/** + * Options interface for initializing the GenAI service using {@link getGenAI | getGenAI()}. + * + * @public + */ +export interface GenAIOptions { + /** + * The backend configuration to use for the GenAI service instance. + * Use {@link googleAIBackend | googleAIBackend()} or + * {@link vertexAIBackend | vertexAIBackend() } to create this configuration. + */ + backend: Backend; } diff --git a/packages/vertexai/src/requests/request-helpers.ts b/packages/vertexai/src/requests/request-helpers.ts index fd2cd04e0fd..411e61c991b 100644 --- a/packages/vertexai/src/requests/request-helpers.ts +++ b/packages/vertexai/src/requests/request-helpers.ts @@ -19,9 +19,9 @@ import { Content, GenerateContentRequest, Part, - VertexAIErrorCode + GenAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { GenAIError } from '../errors'; import { ImagenGenerationParams, PredictRequestBody } from '../types/internal'; export function formatSystemInstruction( @@ -87,15 +87,15 @@ function assignRoleToPartsAndValidateSendMessageRequest( } if (hasUserContent && hasFunctionContent) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.' ); } if (!hasUserContent && !hasFunctionContent) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.' ); } diff --git a/packages/vertexai/src/requests/request.test.ts b/packages/vertexai/src/requests/request.test.ts index cd39a0f8ae5..62c0a230944 100644 --- a/packages/vertexai/src/requests/request.test.ts +++ b/packages/vertexai/src/requests/request.test.ts @@ -22,9 +22,10 @@ import chaiAsPromised from 'chai-as-promised'; import { RequestUrl, Task, getHeaders, makeRequest } from './request'; import { ApiSettings } from '../types/internal'; import { DEFAULT_API_VERSION } from '../constants'; -import { VertexAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { GenAIErrorCode } from '../types'; +import { GenAIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; +import { vertexAIBackend } from '../api'; use(sinonChai); use(chaiAsPromised); @@ -33,7 +34,8 @@ const fakeApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', - location: 'us-central1' + location: 'us-central1', + backend: vertexAIBackend() }; describe('request methods', () => { @@ -106,6 +108,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', + backend: vertexAIBackend(), getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) }; @@ -132,6 +135,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', + backend: vertexAIBackend(), automaticDataCollectionEnabled: true, getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) @@ -156,6 +160,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', + backend: vertexAIBackend(), automaticDataCollectionEnabled: false, getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) @@ -182,7 +187,8 @@ describe('request methods', () => { apiKey: 'key', project: 'myproject', appId: 'my-appid', - location: 'moon' + location: 'moon', + backend: vertexAIBackend() }, true, {} @@ -216,6 +222,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', + backend: vertexAIBackend(), getAppCheckToken: () => Promise.resolve({ token: 'dummytoken', error: Error('oops') }) }, @@ -242,7 +249,8 @@ describe('request methods', () => { apiKey: 'key', project: 'myproject', appId: 'my-appid', - location: 'moon' + location: 'moon', + backend: vertexAIBackend() }, true, {} @@ -302,14 +310,12 @@ describe('request methods', () => { } ); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.FETCH_ERROR - ); - expect((e as VertexAIError).customErrorData?.status).to.equal(500); - expect((e as VertexAIError).customErrorData?.statusText).to.equal( + expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); + expect((e as GenAIError).customErrorData?.status).to.equal(500); + expect((e as GenAIError).customErrorData?.statusText).to.equal( 'AbortError' ); - expect((e as VertexAIError).message).to.include('500 AbortError'); + expect((e as GenAIError).message).to.include('500 AbortError'); } expect(fetchStub).to.be.calledOnce; @@ -329,14 +335,12 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.FETCH_ERROR - ); - expect((e as VertexAIError).customErrorData?.status).to.equal(500); - expect((e as VertexAIError).customErrorData?.statusText).to.equal( + expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); + expect((e as GenAIError).customErrorData?.status).to.equal(500); + expect((e as GenAIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as VertexAIError).message).to.include('500 Server Error'); + expect((e as GenAIError).message).to.include('500 Server Error'); } expect(fetchStub).to.be.calledOnce; }); @@ -356,15 +360,13 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.FETCH_ERROR - ); - expect((e as VertexAIError).customErrorData?.status).to.equal(500); - expect((e as VertexAIError).customErrorData?.statusText).to.equal( + expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); + expect((e as GenAIError).customErrorData?.status).to.equal(500); + expect((e as GenAIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as VertexAIError).message).to.include('500 Server Error'); - expect((e as VertexAIError).message).to.include('extra info'); + expect((e as GenAIError).message).to.include('500 Server Error'); + expect((e as GenAIError).message).to.include('extra info'); } expect(fetchStub).to.be.calledOnce; }); @@ -396,16 +398,14 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.FETCH_ERROR - ); - expect((e as VertexAIError).customErrorData?.status).to.equal(500); - expect((e as VertexAIError).customErrorData?.statusText).to.equal( + expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); + expect((e as GenAIError).customErrorData?.status).to.equal(500); + expect((e as GenAIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as VertexAIError).message).to.include('500 Server Error'); - expect((e as VertexAIError).message).to.include('extra info'); - expect((e as VertexAIError).message).to.include( + expect((e as GenAIError).message).to.include('500 Server Error'); + expect((e as GenAIError).message).to.include('extra info'); + expect((e as GenAIError).message).to.include( 'generic::invalid_argument' ); } @@ -429,11 +429,9 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as VertexAIError).code).to.equal( - VertexAIErrorCode.API_NOT_ENABLED - ); - expect((e as VertexAIError).message).to.include('my-project'); - expect((e as VertexAIError).message).to.include('googleapis.com'); + expect((e as GenAIError).code).to.equal(GenAIErrorCode.API_NOT_ENABLED); + expect((e as GenAIError).message).to.include('my-project'); + expect((e as GenAIError).message).to.include('googleapis.com'); } expect(fetchStub).to.be.calledOnce; }); diff --git a/packages/vertexai/src/requests/request.ts b/packages/vertexai/src/requests/request.ts index 47e4c6ab446..64e299fc422 100644 --- a/packages/vertexai/src/requests/request.ts +++ b/packages/vertexai/src/requests/request.ts @@ -15,8 +15,8 @@ * limitations under the License. */ -import { ErrorDetails, RequestOptions, VertexAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { ErrorDetails, RequestOptions, GenAIErrorCode } from '../types'; +import { GenAIError } from '../errors'; import { ApiSettings } from '../types/internal'; import { DEFAULT_API_VERSION, @@ -26,6 +26,7 @@ import { PACKAGE_VERSION } from '../constants'; import { logger } from '../logger'; +import { BackendType } from '../public-types'; export enum Task { GENERATE_CONTENT = 'generateContent', @@ -43,29 +44,35 @@ export class RequestUrl { public requestOptions?: RequestOptions ) {} toString(): string { - // TODO: allow user-set option if that feature becomes available - const apiVersion = DEFAULT_API_VERSION; - const baseUrl = this.requestOptions?.baseUrl || DEFAULT_BASE_URL; - let url = `${baseUrl}/${apiVersion}`; - url += `/projects/${this.apiSettings.project}`; - url += `/locations/${this.apiSettings.location}`; - url += `/${this.model}`; - url += `:${this.task}`; - if (this.stream) { - url += '?alt=sse'; + const url = new URL(this.baseUrl); // Throws if the URL is invalid + url.pathname = `/${this.apiVersion}/${this.modelPath}:${this.task}`; + url.search = this.queryParams.toString(); + return url.toString(); + } + + private get baseUrl(): string { + return this.requestOptions?.baseUrl || DEFAULT_BASE_URL; + } + + private get apiVersion(): string { + return DEFAULT_API_VERSION; // TODO: allow user-set options if that feature becomes available + } + + private get modelPath(): string { + if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + return `projects/${this.apiSettings.project}/${this.model}`; + } else { + return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; } - return url; } - /** - * If the model needs to be passed to the backend, it needs to - * include project and location path. - */ - get fullModelString(): string { - let modelString = `projects/${this.apiSettings.project}`; - modelString += `/locations/${this.apiSettings.location}`; - modelString += `/${this.model}`; - return modelString; + private get queryParams(): URLSearchParams { + const params = new URLSearchParams(); + if (this.stream) { + params.set('alt', 'sse'); + } + + return params; } } @@ -184,8 +191,8 @@ export async function makeRequest( ) ) ) { - throw new VertexAIError( - VertexAIErrorCode.API_NOT_ENABLED, + throw new GenAIError( + GenAIErrorCode.API_NOT_ENABLED, `The Vertex AI in Firebase SDK requires the Vertex AI in Firebase ` + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + `Firebase project. Enable this API by visiting the Firebase Console ` + @@ -200,8 +207,8 @@ export async function makeRequest( } ); } - throw new VertexAIError( - VertexAIErrorCode.FETCH_ERROR, + throw new GenAIError( + GenAIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { status: response.status, @@ -213,12 +220,12 @@ export async function makeRequest( } catch (e) { let err = e as Error; if ( - (e as VertexAIError).code !== VertexAIErrorCode.FETCH_ERROR && - (e as VertexAIError).code !== VertexAIErrorCode.API_NOT_ENABLED && + (e as GenAIError).code !== GenAIErrorCode.FETCH_ERROR && + (e as GenAIError).code !== GenAIErrorCode.API_NOT_ENABLED && e instanceof Error ) { - err = new VertexAIError( - VertexAIErrorCode.ERROR, + err = new GenAIError( + GenAIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}` ); err.stack = e.stack; diff --git a/packages/vertexai/src/requests/response-helpers.ts b/packages/vertexai/src/requests/response-helpers.ts index 6d0e3bf2a0a..db79abaf937 100644 --- a/packages/vertexai/src/requests/response-helpers.ts +++ b/packages/vertexai/src/requests/response-helpers.ts @@ -23,9 +23,9 @@ import { GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, - VertexAIErrorCode + GenAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { GenAIError } from '../errors'; import { logger } from '../logger'; import { ImagenResponseInternal } from '../types/internal'; @@ -67,8 +67,8 @@ export function addHelpers( ); } if (hadBadFinishReason(response.candidates[0])) { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage( response )}. Response body stored in error.response`, @@ -79,8 +79,8 @@ export function addHelpers( } return getText(response); } else if (response.promptFeedback) { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { response @@ -99,8 +99,8 @@ export function addHelpers( ); } if (hadBadFinishReason(response.candidates[0])) { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage( response )}. Response body stored in error.response`, @@ -111,8 +111,8 @@ export function addHelpers( } return getFunctionCalls(response); } else if (response.promptFeedback) { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { response @@ -217,8 +217,8 @@ export async function handlePredictResponse< // The backend should always send a non-empty array of predictions if the response was successful. if (!responseJson.predictions || responseJson.predictions?.length === 0) { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.' ); } @@ -237,8 +237,8 @@ export async function handlePredictResponse< gcsURI: prediction.gcsUri } as T); } else { - throw new VertexAIError( - VertexAIErrorCode.RESPONSE_ERROR, + throw new GenAIError( + GenAIErrorCode.RESPONSE_ERROR, `Predictions array in response has missing properties. Response: ${JSON.stringify( responseJson )}` diff --git a/packages/vertexai/src/requests/schema-builder.test.ts b/packages/vertexai/src/requests/schema-builder.test.ts index b95acaae9f1..8cef35ade8d 100644 --- a/packages/vertexai/src/requests/schema-builder.test.ts +++ b/packages/vertexai/src/requests/schema-builder.test.ts @@ -18,7 +18,7 @@ import { expect, use } from 'chai'; import sinonChai from 'sinon-chai'; import { Schema } from './schema-builder'; -import { VertexAIErrorCode } from '../types'; +import { GenAIErrorCode } from '../types'; use(sinonChai); @@ -243,7 +243,7 @@ describe('Schema builder', () => { }, optionalProperties: ['cat'] }); - expect(() => schema.toJSON()).to.throw(VertexAIErrorCode.INVALID_SCHEMA); + expect(() => schema.toJSON()).to.throw(GenAIErrorCode.INVALID_SCHEMA); }); }); diff --git a/packages/vertexai/src/requests/schema-builder.ts b/packages/vertexai/src/requests/schema-builder.ts index 3d219d58b13..a729b0a6ebf 100644 --- a/packages/vertexai/src/requests/schema-builder.ts +++ b/packages/vertexai/src/requests/schema-builder.ts @@ -15,8 +15,8 @@ * limitations under the License. */ -import { VertexAIError } from '../errors'; -import { VertexAIErrorCode } from '../types'; +import { GenAIError } from '../errors'; +import { GenAIErrorCode } from '../types'; import { SchemaInterface, SchemaType, @@ -266,8 +266,8 @@ export class ObjectSchema extends Schema { if (this.optionalProperties) { for (const propertyKey of this.optionalProperties) { if (!this.properties.hasOwnProperty(propertyKey)) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_SCHEMA, + throw new GenAIError( + GenAIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.` ); } diff --git a/packages/vertexai/src/requests/stream-reader.test.ts b/packages/vertexai/src/requests/stream-reader.test.ts index bf959276a93..2e7041f064a 100644 --- a/packages/vertexai/src/requests/stream-reader.test.ts +++ b/packages/vertexai/src/requests/stream-reader.test.ts @@ -34,9 +34,19 @@ import { HarmCategory, HarmProbability, SafetyRating, - VertexAIErrorCode + GenAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { GenAIError } from '../errors'; +import { ApiSettings } from '../types/internal'; +import { vertexAIBackend } from '../api'; + +const fakeApiSettings: ApiSettings = { + apiKey: 'key', + project: 'my-project', + appId: 'my-appid', + location: 'us-central1', + backend: vertexAIBackend() +}; use(sinonChai); @@ -75,7 +85,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-basic-reply-short.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); for await (const response of result.stream) { expect(response.text()).to.not.be.empty; } @@ -87,7 +97,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-basic-reply-long.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); for await (const response of result.stream) { expect(response.text()).to.not.be.empty; } @@ -101,7 +111,7 @@ describe('processStream', () => { 'streaming-success-basic-reply-long.txt', 1e6 ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); for await (const response of result.stream) { expect(response.text()).to.not.be.empty; } @@ -114,7 +124,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-utf8.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); for await (const response of result.stream) { expect(response.text()).to.not.be.empty; } @@ -127,7 +137,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-function-call-short.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); for await (const response of result.stream) { expect(response.text()).to.be.empty; expect(response.functionCalls()).to.be.deep.equal([ @@ -151,7 +161,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-failure-finish-reason-safety.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.candidates?.[0].finishReason).to.equal('SAFETY'); expect(aggregatedResponse.text).to.throw('SAFETY'); @@ -164,7 +174,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-failure-prompt-blocked-safety.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text).to.throw('SAFETY'); expect(aggregatedResponse.promptFeedback?.blockReason).to.equal('SAFETY'); @@ -177,7 +187,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-failure-empty-content.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text()).to.equal(''); for await (const response of result.stream) { @@ -189,7 +199,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-unknown-safety-enum.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text()).to.include('Cats'); for await (const response of result.stream) { @@ -201,7 +211,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-failure-recitation-no-content.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text).to.throw('RECITATION'); expect(aggregatedResponse.candidates?.[0].content.parts[0].text).to.include( @@ -220,7 +230,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-citations.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text()).to.include('Quantum mechanics is'); expect( @@ -240,7 +250,7 @@ describe('processStream', () => { 'vertexAI', 'streaming-success-empty-text-part.txt' ); - const result = processStream(fakeResponse as Response); + const result = processStream(fakeResponse as Response, fakeApiSettings); const aggregatedResponse = await result.response; expect(aggregatedResponse.text()).to.equal('1'); expect(aggregatedResponse.candidates?.length).to.equal(1); @@ -472,10 +482,8 @@ describe('aggregateResponses', () => { try { aggregateResponses(responsesToAggregate); } catch (e) { - expect((e as VertexAIError).code).includes( - VertexAIErrorCode.INVALID_CONTENT - ); - expect((e as VertexAIError).message).to.include( + expect((e as GenAIError).code).includes(GenAIErrorCode.INVALID_CONTENT); + expect((e as GenAIError).message).to.include( 'Part should have at least one property, but there are none. This is likely caused ' + 'by a malformed response from the backend.' ); diff --git a/packages/vertexai/src/requests/stream-reader.ts b/packages/vertexai/src/requests/stream-reader.ts index 5c419d114e0..db1f99d7b27 100644 --- a/packages/vertexai/src/requests/stream-reader.ts +++ b/packages/vertexai/src/requests/stream-reader.ts @@ -21,10 +21,14 @@ import { GenerateContentResponse, GenerateContentStreamResult, Part, - VertexAIErrorCode + GenAIErrorCode } from '../types'; -import { VertexAIError } from '../errors'; +import { GenAIError } from '../errors'; import { createEnhancedContentResponse } from './response-helpers'; +import * as GoogleAIMapper from '../googleAIMappers'; +import { GoogleAIGenerateContentResponse } from '../types/googleAI'; +import { ApiSettings } from '../types/internal'; +import { BackendType } from '../public-types'; const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; @@ -36,7 +40,10 @@ const responseLineRE = /^data\: (.*)(?:\n\n|\r\r|\r\n\r\n)/; * * @param response - Response from a fetch call */ -export function processStream(response: Response): GenerateContentStreamResult { +export function processStream( + response: Response, + apiSettings: ApiSettings +): GenerateContentStreamResult { const inputStream = response.body!.pipeThrough( new TextDecoderStream('utf8', { fatal: true }) ); @@ -44,23 +51,27 @@ export function processStream(response: Response): GenerateContentStreamResult { getResponseStream(inputStream); const [stream1, stream2] = responseStream.tee(); return { - stream: generateResponseSequence(stream1), - response: getResponsePromise(stream2) + stream: generateResponseSequence(stream1, apiSettings), + response: getResponsePromise(stream2, apiSettings) }; } async function getResponsePromise( - stream: ReadableStream + stream: ReadableStream, + apiSettings: ApiSettings ): Promise { const allResponses: GenerateContentResponse[] = []; const reader = stream.getReader(); while (true) { const { done, value } = await reader.read(); if (done) { - const enhancedResponse = createEnhancedContentResponse( - aggregateResponses(allResponses) - ); - return enhancedResponse; + let generateContentResponse = aggregateResponses(allResponses); + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + generateContentResponse = GoogleAIMapper.mapGenerateContentResponse( + generateContentResponse as GoogleAIGenerateContentResponse + ); + } + return createEnhancedContentResponse(generateContentResponse); } allResponses.push(value); @@ -68,7 +79,8 @@ async function getResponsePromise( } async function* generateResponseSequence( - stream: ReadableStream + stream: ReadableStream, + apiSettings: ApiSettings ): AsyncGenerator { const reader = stream.getReader(); while (true) { @@ -77,7 +89,17 @@ async function* generateResponseSequence( break; } - const enhancedResponse = createEnhancedContentResponse(value); + let enhancedResponse: EnhancedGenerateContentResponse; + if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + enhancedResponse = createEnhancedContentResponse( + GoogleAIMapper.mapGenerateContentResponse( + value as GoogleAIGenerateContentResponse + ) + ); + } else { + enhancedResponse = createEnhancedContentResponse(value); + } + yield enhancedResponse; } } @@ -100,8 +122,8 @@ export function getResponseStream( if (done) { if (currentText.trim()) { controller.error( - new VertexAIError( - VertexAIErrorCode.PARSE_FAILED, + new GenAIError( + GenAIErrorCode.PARSE_FAILED, 'Failed to parse stream' ) ); @@ -119,8 +141,8 @@ export function getResponseStream( parsedResponse = JSON.parse(match[1]); } catch (e) { controller.error( - new VertexAIError( - VertexAIErrorCode.PARSE_FAILED, + new GenAIError( + GenAIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}` ) ); @@ -198,8 +220,8 @@ export function aggregateResponses( newPart.functionCall = part.functionCall; } if (Object.keys(newPart).length === 0) { - throw new VertexAIError( - VertexAIErrorCode.INVALID_CONTENT, + throw new GenAIError( + GenAIErrorCode.INVALID_CONTENT, 'Part should have at least one property, but there are none. This is likely caused ' + 'by a malformed response from the backend.' ); diff --git a/packages/vertexai/src/service.test.ts b/packages/vertexai/src/service.test.ts index d3487e9bdd2..e76037971e5 100644 --- a/packages/vertexai/src/service.test.ts +++ b/packages/vertexai/src/service.test.ts @@ -14,8 +14,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +import { vertexAIBackend } from './api'; import { DEFAULT_LOCATION } from './constants'; -import { VertexAIService } from './service'; +import { GenAIService } from './service'; import { expect } from 'chai'; const fakeApp = { @@ -27,18 +28,19 @@ const fakeApp = { } }; -describe('VertexAIService', () => { +describe('GenAIService', () => { + // TODO (dlarocque): move some of these tests to helpers.test.ts it('uses default location if not specified', () => { - const vertexAI = new VertexAIService(fakeApp); - expect(vertexAI.location).to.equal(DEFAULT_LOCATION); + const genAI = new GenAIService(fakeApp, vertexAIBackend()); + expect(genAI.location).to.equal(DEFAULT_LOCATION); }); it('uses custom location if specified', () => { - const vertexAI = new VertexAIService( + const genAI = new GenAIService( fakeApp, + vertexAIBackend('somewhere'), /* authProvider */ undefined, - /* appCheckProvider */ undefined, - { location: 'somewhere' } + /* appCheckProvider */ undefined ); - expect(vertexAI.location).to.equal('somewhere'); + expect(genAI.location).to.equal('somewhere'); }); }); diff --git a/packages/vertexai/src/service.ts b/packages/vertexai/src/service.ts index 05b2d559e58..d793ef5acfc 100644 --- a/packages/vertexai/src/service.ts +++ b/packages/vertexai/src/service.ts @@ -16,7 +16,7 @@ */ import { FirebaseApp, _FirebaseService } from '@firebase/app'; -import { VertexAI, VertexAIOptions } from './public-types'; +import { Backend, GenAI } from './public-types'; import { AppCheckInternalComponentName, FirebaseAppCheckInternal @@ -26,24 +26,28 @@ import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types'; -import { DEFAULT_LOCATION } from './constants'; -export class VertexAIService implements VertexAI, _FirebaseService { +export class GenAIService implements GenAI, _FirebaseService { auth: FirebaseAuthInternal | null; appCheck: FirebaseAppCheckInternal | null; - location: string; + location: string; // This is here for backwards-compatibility constructor( public app: FirebaseApp, + public backend: Backend, authProvider?: Provider, - appCheckProvider?: Provider, - public options?: VertexAIOptions + appCheckProvider?: Provider ) { const appCheck = appCheckProvider?.getImmediate({ optional: true }); const auth = authProvider?.getImmediate({ optional: true }); this.auth = auth || null; this.appCheck = appCheck || null; - this.location = this.options?.location || DEFAULT_LOCATION; + + if (backend.backendType === 'VERTEX_AI') { + this.location = backend.location; + } else { + this.location = ''; + } } _delete(): Promise { diff --git a/packages/vertexai/src/types/enums.ts b/packages/vertexai/src/types/enums.ts index a9481d40f5f..ffa072f33d7 100644 --- a/packages/vertexai/src/types/enums.ts +++ b/packages/vertexai/src/types/enums.ts @@ -62,11 +62,13 @@ export enum HarmBlockThreshold { } /** + * This property is not supported in Google AI. + * * @public */ export enum HarmBlockMethod { /** - * The harm block method uses both probability and severity scores. + * The harm block method uses both probability and severity scores. */ SEVERITY = 'SEVERITY', /** @@ -118,7 +120,12 @@ export enum HarmSeverity { /** * High level of harm severity. */ - HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH' + HARM_SEVERITY_HIGH = 'HARM_SEVERITY_HIGH', + /** + * Harm severity is not supported. + * The GoogleAI backend does not support `HarmSeverity`, so this value is used as a fallback. + */ + HARM_SEVERITY_UNSUPPORTED = 'HARM_SEVERITY_UNSUPPORTED' } /** diff --git a/packages/vertexai/src/types/error.ts b/packages/vertexai/src/types/error.ts index b1f075101a6..c29bbe95284 100644 --- a/packages/vertexai/src/types/error.ts +++ b/packages/vertexai/src/types/error.ts @@ -58,11 +58,11 @@ export interface CustomErrorData { } /** - * Standardized error codes that {@link VertexAIError} can have. + * Standardized error codes that {@link GenAIError} can have. * * @public */ -export const enum VertexAIErrorCode { +export const enum GenAIErrorCode { /** A generic error occurred. */ ERROR = 'error', @@ -97,5 +97,8 @@ export const enum VertexAIErrorCode { NO_PROJECT_ID = 'no-project-id', /** An error occurred while parsing. */ - PARSE_FAILED = 'parse-failed' + PARSE_FAILED = 'parse-failed', + + /** An error occured due an attempt to use an unsupported feature. */ + UNSUPPORTED = 'unsupported' } diff --git a/packages/vertexai/src/types/googleAI/index.ts b/packages/vertexai/src/types/googleAI/index.ts new file mode 100644 index 00000000000..546c64f13b1 --- /dev/null +++ b/packages/vertexai/src/types/googleAI/index.ts @@ -0,0 +1,19 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export * from './requests'; +export * from './responses'; diff --git a/packages/vertexai/src/types/googleAI/requests.ts b/packages/vertexai/src/types/googleAI/requests.ts new file mode 100644 index 00000000000..94dfb4c4241 --- /dev/null +++ b/packages/vertexai/src/types/googleAI/requests.ts @@ -0,0 +1,29 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Content, Part } from '../content'; +import { GenerationConfig, Tool } from '../requests'; + +export interface GoogleAICountTokensRequest { + generateContentRequest: { + model: string; // 'models/model-name' + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} diff --git a/packages/vertexai/src/types/googleAI/responses.ts b/packages/vertexai/src/types/googleAI/responses.ts new file mode 100644 index 00000000000..702464cecc7 --- /dev/null +++ b/packages/vertexai/src/types/googleAI/responses.ts @@ -0,0 +1,46 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Content } from '../content'; +import { FinishReason } from '../enums'; +import { + Citation, + GroundingMetadata, + PromptFeedback, + SafetyRating, + UsageMetadata +} from '../responses'; + +export interface GoogleAIGenerateContentResponse { + candidates?: GoogleAIGenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} + +export interface GoogleAIGenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: GoogleAICitationMetadata; + groundingMetadata?: GroundingMetadata; +} + +export interface GoogleAICitationMetadata { + citationSources: Citation[]; // Maps to `citations` +} diff --git a/packages/vertexai/src/types/imagen/requests.ts b/packages/vertexai/src/types/imagen/requests.ts index 70ae182238e..d5bc3c31223 100644 --- a/packages/vertexai/src/types/imagen/requests.ts +++ b/packages/vertexai/src/types/imagen/requests.ts @@ -88,6 +88,9 @@ export interface ImagenGenerationConfig { * * For Imagen 3 models, the default value is `true`; see the addWatermark * documentation for more details. + * + * @remarks + * In Google AI, the default value is true, and it cannot be turned off. */ addWatermark?: boolean; } diff --git a/packages/vertexai/src/types/internal.ts b/packages/vertexai/src/types/internal.ts index a3476afd028..4303d4c07d8 100644 --- a/packages/vertexai/src/types/internal.ts +++ b/packages/vertexai/src/types/internal.ts @@ -17,6 +17,7 @@ import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; +import { Backend } from '../public-types'; export * from './imagen/internal'; @@ -24,8 +25,14 @@ export interface ApiSettings { apiKey: string; project: string; appId: string; - location: string; automaticDataCollectionEnabled?: boolean; + /** + * @deprecated + */ + location: string; + backend: Backend; getAuthToken?: () => Promise; getAppCheckToken?: () => Promise; } + +export type InstanceIdentifier = Backend; diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts index c15258b06d0..f18a0ae0c48 100644 --- a/packages/vertexai/src/types/requests.ts +++ b/packages/vertexai/src/types/requests.ts @@ -63,6 +63,11 @@ export interface GenerateContentRequest extends BaseParams { export interface SafetySetting { category: HarmCategory; threshold: HarmBlockThreshold; + /** + * This property is not supported in Google AI. + * If this is a property on a {@link GenerateContentRequest} to be sent, a {@link GenAIError} + * will be thrown. + */ method?: HarmBlockMethod; } diff --git a/packages/vertexai/src/types/responses.ts b/packages/vertexai/src/types/responses.ts index 7f68df1e679..844b0b2934b 100644 --- a/packages/vertexai/src/types/responses.ts +++ b/packages/vertexai/src/types/responses.ts @@ -108,7 +108,7 @@ export interface ModalityTokenCount { export interface PromptFeedback { blockReason?: BlockReason; safetyRatings: SafetyRating[]; - blockReasonMessage?: string; + blockReasonMessage?: string; // This will always be undefined when using Google AI. } /** @@ -142,8 +142,8 @@ export interface Citation { endIndex?: number; uri?: string; license?: string; - title?: string; - publicationDate?: Date; + title?: string; // This will always be undefined when using Google AI. + publicationDate?: Date; // This will always be undefined when using Google AI. } /** @@ -215,7 +215,7 @@ export interface SafetyRating { severity: HarmSeverity; probabilityScore: number; severityScore: number; - blocked: boolean; + blocked: boolean; // FIXME: This is only included when it's true. Either set a default of false, or make this optional. } /** @@ -231,7 +231,7 @@ export interface CountTokensResponse { * The total number of billable characters counted across all instances * from the request. */ - totalBillableCharacters?: number; + totalBillableCharacters?: number; // This will always be undefined when using Google AI. /** * The breakdown, by modality, of how many tokens are consumed by the prompt. */ diff --git a/packages/vertexai/src/types/schema.ts b/packages/vertexai/src/types/schema.ts index 5c23655be0e..c73caca9993 100644 --- a/packages/vertexai/src/types/schema.ts +++ b/packages/vertexai/src/types/schema.ts @@ -42,7 +42,10 @@ export enum SchemaType { * @public */ export interface SchemaShared { - /** Optional. The format of the property. */ + /** Optional. The format of the property. + * When using the Google AI backend, this must be either `'enum'` or `'date-time'`, otherwise + * requests will fail. + */ format?: string; /** Optional. The description of the property. */ description?: string; diff --git a/packages/vertexai/test-utils/mock-response.ts b/packages/vertexai/test-utils/mock-response.ts index 5128ddabe74..c108704d615 100644 --- a/packages/vertexai/test-utils/mock-response.ts +++ b/packages/vertexai/test-utils/mock-response.ts @@ -72,6 +72,9 @@ export function getMockResponse( filename: string ): Partial { const mocksLookup = mockSetMaps[backendName]; + if (backendName === 'googleAI') { + console.log(Object.keys(mocksLookup)); + } if (!(filename in mocksLookup)) { throw Error(`${backendName} mock response file '${filename}' not found.`); } From 4e831d1bfd7804f3da7a918aa3055b97a0e6ffeb Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Thu, 17 Apr 2025 14:50:57 -0400 Subject: [PATCH 02/16] Convert `GenAI` to new Firebase AI naming --- common/api-review/vertexai.api.md | 131 ++++++++------- docs-devsite/_toc.yaml | 16 +- docs-devsite/index.md | 2 +- docs-devsite/vertexai.ai.md | 64 +++++++ ...exai.genaierror.md => vertexai.aierror.md} | 26 +-- ...exai.genaimodel.md => vertexai.aimodel.md} | 12 +- docs-devsite/vertexai.aioptions.md | 35 ++++ docs-devsite/vertexai.genai.md | 64 ------- docs-devsite/vertexai.genaioptions.md | 35 ---- docs-devsite/vertexai.generativemodel.md | 10 +- docs-devsite/vertexai.imagenmodel.md | 12 +- docs-devsite/vertexai.md | 158 +++++++++--------- docs-devsite/vertexai.modelparams.md | 2 +- docs-devsite/vertexai.requestoptions.md | 2 +- docs-devsite/vertexai.safetysetting.md | 4 +- docs-devsite/vertexai.vertexaioptions.md | 2 +- packages/firebase/package.json | 2 +- packages/vertexai/src/api.test.ts | 100 +++++------ packages/vertexai/src/api.ts | 82 ++++----- .../src/backwards-compatbility.test.ts | 38 ++--- packages/vertexai/src/constants.ts | 4 +- packages/vertexai/src/errors.ts | 22 +-- packages/vertexai/src/googleAIMappers.test.ts | 12 +- packages/vertexai/src/googleAIMappers.ts | 12 +- packages/vertexai/src/helpers.test.ts | 56 +++---- packages/vertexai/src/helpers.ts | 30 ++-- packages/vertexai/src/index.node.ts | 16 +- packages/vertexai/src/index.ts | 24 +-- .../src/methods/chat-session-helpers.ts | 28 ++-- .../src/methods/generate-content.test.ts | 8 +- .../vertexai/src/models/genai-model.test.ts | 44 ++--- packages/vertexai/src/models/genai-model.ts | 70 ++++---- .../src/models/generative-model.test.ts | 18 +- .../vertexai/src/models/generative-model.ts | 10 +- .../vertexai/src/models/imagen-model.test.ts | 20 +-- packages/vertexai/src/models/imagen-model.ts | 14 +- packages/vertexai/src/public-types.ts | 38 ++--- .../vertexai/src/requests/request-helpers.ts | 12 +- .../vertexai/src/requests/request.test.ts | 48 +++--- packages/vertexai/src/requests/request.ts | 22 +-- .../vertexai/src/requests/response-helpers.ts | 28 ++-- .../src/requests/schema-builder.test.ts | 4 +- .../vertexai/src/requests/schema-builder.ts | 8 +- .../src/requests/stream-reader.test.ts | 8 +- .../vertexai/src/requests/stream-reader.ts | 16 +- packages/vertexai/src/service.test.ts | 12 +- packages/vertexai/src/service.ts | 4 +- packages/vertexai/src/types/error.ts | 4 +- packages/vertexai/src/types/requests.ts | 2 +- 49 files changed, 697 insertions(+), 694 deletions(-) create mode 100644 docs-devsite/vertexai.ai.md rename docs-devsite/{vertexai.genaierror.md => vertexai.aierror.md} (52%) rename docs-devsite/{vertexai.genaimodel.md => vertexai.aimodel.md} (67%) create mode 100644 docs-devsite/vertexai.aioptions.md delete mode 100644 docs-devsite/vertexai.genai.md delete mode 100644 docs-devsite/vertexai.genaioptions.md diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index 66012360050..cc1a0c3269c 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -9,6 +9,62 @@ import { FirebaseApp } from '@firebase/app'; import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; import { FirebaseError } from '@firebase/util'; +// @public +export interface AI { + app: FirebaseApp; + backend: Backend; + // @deprecated + location: string; +} + +// @public +export class AIError extends FirebaseError { + constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); + // (undocumented) + readonly code: AIErrorCode; + // (undocumented) + readonly customErrorData?: CustomErrorData | undefined; +} + +// @public +const enum AIErrorCode { + API_NOT_ENABLED = "api-not-enabled", + ERROR = "error", + FETCH_ERROR = "fetch-error", + INVALID_CONTENT = "invalid-content", + INVALID_SCHEMA = "invalid-schema", + NO_API_KEY = "no-api-key", + NO_APP_ID = "no-app-id", + NO_MODEL = "no-model", + NO_PROJECT_ID = "no-project-id", + PARSE_FAILED = "parse-failed", + REQUEST_ERROR = "request-error", + RESPONSE_ERROR = "response-error", + UNSUPPORTED = "unsupported" +} + +export { AIErrorCode } + +export { AIErrorCode as VertexAIErrorCode } + +// @public +export abstract class AIModel { + // @internal + protected constructor(ai: AI, modelName: string); + // Warning: (ae-forgotten-export) The symbol "ApiSettings" needs to be exported by the entry point index.d.ts + // + // @internal (undocumented) + protected _apiSettings: ApiSettings; + readonly model: string; + // @internal + static normalizeModelName(modelName: string, backendType: BackendType): string; + } + +// @public +export interface AIOptions { + backend: Backend; +} + // @public export class ArraySchema extends Schema { constructor(schemaParams: SchemaParams, items: TypedSchema); @@ -53,7 +109,6 @@ export class BooleanSchema extends Schema { // @public export class ChatSession { - // Warning: (ae-forgotten-export) The symbol "ApiSettings" needs to be exported by the entry point index.d.ts constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); getHistory(): Promise; // (undocumented) @@ -251,60 +306,6 @@ export interface FunctionResponsePart { text?: never; } -// @public -export interface GenAI { - app: FirebaseApp; - backend: Backend; - // @deprecated - location: string; -} - -// @public -export class GenAIError extends FirebaseError { - constructor(code: GenAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); - // (undocumented) - readonly code: GenAIErrorCode; - // (undocumented) - readonly customErrorData?: CustomErrorData | undefined; -} - -// @public -const enum GenAIErrorCode { - API_NOT_ENABLED = "api-not-enabled", - ERROR = "error", - FETCH_ERROR = "fetch-error", - INVALID_CONTENT = "invalid-content", - INVALID_SCHEMA = "invalid-schema", - NO_API_KEY = "no-api-key", - NO_APP_ID = "no-app-id", - NO_MODEL = "no-model", - NO_PROJECT_ID = "no-project-id", - PARSE_FAILED = "parse-failed", - REQUEST_ERROR = "request-error", - RESPONSE_ERROR = "response-error", - UNSUPPORTED = "unsupported" -} - -export { GenAIErrorCode } - -export { GenAIErrorCode as VertexAIErrorCode } - -// @public -export abstract class GenAIModel { - // @internal - protected constructor(genAI: GenAI, modelName: string); - // @internal (undocumented) - protected _apiSettings: ApiSettings; - readonly model: string; - // @internal - static normalizeModelName(modelName: string, backendType: BackendType): string; - } - -// @public -export interface GenAIOptions { - backend: Backend; -} - // @public export interface GenerateContentCandidate { // (undocumented) @@ -389,8 +390,8 @@ export interface GenerativeContentBlob { } // @public -export class GenerativeModel extends GenAIModel { - constructor(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions); +export class GenerativeModel extends AIModel { + constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); countTokens(request: CountTokensRequest | string | Array): Promise; generateContent(request: GenerateContentRequest | string | Array): Promise; generateContentStream(request: GenerateContentRequest | string | Array): Promise; @@ -410,13 +411,13 @@ export class GenerativeModel extends GenAIModel { } // @public -export function getGenAI(app?: FirebaseApp, options?: GenAIOptions): GenAI; +export function getAI(app?: FirebaseApp, options?: AIOptions): AI; // @public -export function getGenerativeModel(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; // @beta -export function getImagenModel(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; // @public export function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI; @@ -539,8 +540,8 @@ export interface ImagenInlineImage { } // @beta -export class ImagenModel extends GenAIModel { - constructor(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); +export class ImagenModel extends AIModel { + constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); generateImages(prompt: string): Promise>; // @internal generateImagesGCS(prompt: string, gcsURI: string): Promise>; @@ -856,7 +857,7 @@ export interface UsageMetadata { } // @public -export type VertexAI = GenAI; +export type VertexAI = AI; // @public export type VertexAIBackend = { @@ -868,10 +869,10 @@ export type VertexAIBackend = { export function vertexAIBackend(location?: string): VertexAIBackend; // @public -export const VertexAIError: typeof GenAIError; +export const VertexAIError: typeof AIError; // @public -export const VertexAIModel: typeof GenAIModel; +export const VertexAIModel: typeof AIModel; // @public export interface VertexAIOptions { diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml index 19ed87f1e54..03d6b5f6ec7 100644 --- a/docs-devsite/_toc.yaml +++ b/docs-devsite/_toc.yaml @@ -472,6 +472,14 @@ toc: - title: vertexai path: /docs/reference/js/vertexai.md section: + - title: AI + path: /docs/reference/js/vertexai.ai.md + - title: AIError + path: /docs/reference/js/vertexai.aierror.md + - title: AIModel + path: /docs/reference/js/vertexai.aimodel.md + - title: AIOptions + path: /docs/reference/js/vertexai.aioptions.md - title: ArraySchema path: /docs/reference/js/vertexai.arrayschema.md - title: BaseParams @@ -516,14 +524,6 @@ toc: path: /docs/reference/js/vertexai.functionresponse.md - title: FunctionResponsePart path: /docs/reference/js/vertexai.functionresponsepart.md - - title: GenAI - path: /docs/reference/js/vertexai.genai.md - - title: GenAIError - path: /docs/reference/js/vertexai.genaierror.md - - title: GenAIModel - path: /docs/reference/js/vertexai.genaimodel.md - - title: GenAIOptions - path: /docs/reference/js/vertexai.genaioptions.md - title: GenerateContentCandidate path: /docs/reference/js/vertexai.generatecontentcandidate.md - title: GenerateContentRequest diff --git a/docs-devsite/index.md b/docs-devsite/index.md index 82fdb36f076..af34d0d0250 100644 --- a/docs-devsite/index.md +++ b/docs-devsite/index.md @@ -27,5 +27,5 @@ https://github.com/firebase/firebase-js-sdk | [@firebase/performance](./performance.md#performance_package) | The Firebase Performance Monitoring Web SDK. This SDK does not work in a Node.js environment. | | [@firebase/remote-config](./remote-config.md#remote-config_package) | The Firebase Remote Config Web SDK. This SDK does not work in a Node.js environment. | | [@firebase/storage](./storage.md#storage_package) | Cloud Storage for Firebase | -| [@firebase/vertexai](./vertexai.md#vertexai_package) | The Vertex AI in Firebase Web SDK. | +| [@firebase/vertexai](./vertexai.md#vertexai_package) | The Firebase AI Web SDK. | diff --git a/docs-devsite/vertexai.ai.md b/docs-devsite/vertexai.ai.md new file mode 100644 index 00000000000..2901c2ccd01 --- /dev/null +++ b/docs-devsite/vertexai.ai.md @@ -0,0 +1,64 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# AI interface +An instance of the Firebase AI SDK. + +Do not create this instance directly. Instead, use [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export interface AI +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [app](./vertexai.ai.md#aiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [AI](./vertexai.ai.md#ai_interface) instance is associated with. | +| [backend](./vertexai.ai.md#aibackend) | [Backend](./vertexai.md#backend) | A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. | +| [location](./vertexai.ai.md#ailocation) | string | The location configured for this AI service instance, relevant for Vertex AI backends. | + +## AI.app + +The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [AI](./vertexai.ai.md#ai_interface) instance is associated with. + +Signature: + +```typescript +app: FirebaseApp; +``` + +## AI.backend + +A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. + +Signature: + +```typescript +backend: Backend; +``` + +## AI.location + +> Warning: This API is now obsolete. +> +> use `AI.backend.location` instead. +> + +The location configured for this AI service instance, relevant for Vertex AI backends. + +Signature: + +```typescript +location: string; +``` diff --git a/docs-devsite/vertexai.genaierror.md b/docs-devsite/vertexai.aierror.md similarity index 52% rename from docs-devsite/vertexai.genaierror.md rename to docs-devsite/vertexai.aierror.md index eed0a54d264..dac50815b6a 100644 --- a/docs-devsite/vertexai.genaierror.md +++ b/docs-devsite/vertexai.aierror.md @@ -9,13 +9,13 @@ overwritten. Changes should be made in the source code at https://github.com/firebase/firebase-js-sdk {% endcomment %} -# GenAIError class -Error class for the Vertex AI in Firebase SDK. +# AIError class +Error class for the Firebase AI SDK. Signature: ```typescript -export declare class GenAIError extends FirebaseError +export declare class AIError extends FirebaseError ``` Extends: [FirebaseError](./util.firebaseerror.md#firebaseerror_class) @@ -23,42 +23,42 @@ export declare class GenAIError extends FirebaseError | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(code, message, customErrorData)](./vertexai.genaierror.md#genaierrorconstructor) | | Constructs a new instance of the GenAIError class. | +| [(constructor)(code, message, customErrorData)](./vertexai.aierror.md#aierrorconstructor) | | Constructs a new instance of the AIError class. | ## Properties | Property | Modifiers | Type | Description | | --- | --- | --- | --- | -| [code](./vertexai.genaierror.md#genaierrorcode) | | [GenAIErrorCode](./vertexai.md#genaierrorcode) | | -| [customErrorData](./vertexai.genaierror.md#genaierrorcustomerrordata) | | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | | +| [code](./vertexai.aierror.md#aierrorcode) | | [AIErrorCode](./vertexai.md#aierrorcode) | | +| [customErrorData](./vertexai.aierror.md#aierrorcustomerrordata) | | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | | -## GenAIError.(constructor) +## AIError.(constructor) -Constructs a new instance of the `GenAIError` class. +Constructs a new instance of the `AIError` class. Signature: ```typescript -constructor(code: GenAIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); +constructor(code: AIErrorCode, message: string, customErrorData?: CustomErrorData | undefined); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| code | [GenAIErrorCode](./vertexai.md#genaierrorcode) | The error code from [GenAIErrorCode](./vertexai.md#genaierrorcode). | +| code | [AIErrorCode](./vertexai.md#aierrorcode) | The error code from [AIErrorCode](./vertexai.md#aierrorcode). | | message | string | A human-readable message describing the error. | | customErrorData | [CustomErrorData](./vertexai.customerrordata.md#customerrordata_interface) \| undefined | Optional error data. | -## GenAIError.code +## AIError.code Signature: ```typescript -readonly code: GenAIErrorCode; +readonly code: AIErrorCode; ``` -## GenAIError.customErrorData +## AIError.customErrorData Signature: diff --git a/docs-devsite/vertexai.genaimodel.md b/docs-devsite/vertexai.aimodel.md similarity index 67% rename from docs-devsite/vertexai.genaimodel.md rename to docs-devsite/vertexai.aimodel.md index 3d744a77e28..ee142d27c47 100644 --- a/docs-devsite/vertexai.genaimodel.md +++ b/docs-devsite/vertexai.aimodel.md @@ -9,24 +9,24 @@ overwritten. Changes should be made in the source code at https://github.com/firebase/firebase-js-sdk {% endcomment %} -# GenAIModel class -Base class for Vertex AI in Firebase model APIs. +# AIModel class +Base class for Firebase AI model APIs. -The constructor for this class is marked as internal. Third-party code should not call the constructor directly or create subclasses that extend the `GenAIModel` class. +The constructor for this class is marked as internal. Third-party code should not call the constructor directly or create subclasses that extend the `AIModel` class. Signature: ```typescript -export declare abstract class GenAIModel +export declare abstract class AIModel ``` ## Properties | Property | Modifiers | Type | Description | | --- | --- | --- | --- | -| [model](./vertexai.genaimodel.md#genaimodelmodel) | | string | The fully qualified model resource name to use for generating images (for example, publishers/google/models/imagen-3.0-generate-002). | +| [model](./vertexai.aimodel.md#aimodelmodel) | | string | The fully qualified model resource name to use for generating images (for example, publishers/google/models/imagen-3.0-generate-002). | -## GenAIModel.model +## AIModel.model The fully qualified model resource name to use for generating images (for example, `publishers/google/models/imagen-3.0-generate-002`). diff --git a/docs-devsite/vertexai.aioptions.md b/docs-devsite/vertexai.aioptions.md new file mode 100644 index 00000000000..4d5e7117740 --- /dev/null +++ b/docs-devsite/vertexai.aioptions.md @@ -0,0 +1,35 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# AIOptions interface +Options interface for initializing the AI service using [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export interface AIOptions +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [backend](./vertexai.aioptions.md#aioptionsbackend) | [Backend](./vertexai.md#backend) | The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. | + +## AIOptions.backend + +The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. + +Signature: + +```typescript +backend: Backend; +``` diff --git a/docs-devsite/vertexai.genai.md b/docs-devsite/vertexai.genai.md deleted file mode 100644 index d8ce617941a..00000000000 --- a/docs-devsite/vertexai.genai.md +++ /dev/null @@ -1,64 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# GenAI interface -An instance of the Firebase GenAI SDK. - -Do not create this instance directly. Instead, use [getGenAI()](./vertexai.md#getgenai_65c48ee). - -Signature: - -```typescript -export interface GenAI -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [app](./vertexai.genai.md#genaiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [GenAI](./vertexai.genai.md#genai_interface) instance is associated with. | -| [backend](./vertexai.genai.md#genaibackend) | [Backend](./vertexai.md#backend) | A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. | -| [location](./vertexai.genai.md#genailocation) | string | The location configured for this GenAI service instance, relevant for Vertex AI backends. | - -## GenAI.app - -The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [GenAI](./vertexai.genai.md#genai_interface) instance is associated with. - -Signature: - -```typescript -app: FirebaseApp; -``` - -## GenAI.backend - -A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. - -Signature: - -```typescript -backend: Backend; -``` - -## GenAI.location - -> Warning: This API is now obsolete. -> -> use `GenAI.backend.location` instead. -> - -The location configured for this GenAI service instance, relevant for Vertex AI backends. - -Signature: - -```typescript -location: string; -``` diff --git a/docs-devsite/vertexai.genaioptions.md b/docs-devsite/vertexai.genaioptions.md deleted file mode 100644 index d7f5b499f67..00000000000 --- a/docs-devsite/vertexai.genaioptions.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# GenAIOptions interface -Options interface for initializing the GenAI service using [getGenAI()](./vertexai.md#getgenai_65c48ee). - -Signature: - -```typescript -export interface GenAIOptions -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [backend](./vertexai.genaioptions.md#genaioptionsbackend) | [Backend](./vertexai.md#backend) | The backend configuration to use for the GenAI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. | - -## GenAIOptions.backend - -The backend configuration to use for the GenAI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. - -Signature: - -```typescript -backend: Backend; -``` diff --git a/docs-devsite/vertexai.generativemodel.md b/docs-devsite/vertexai.generativemodel.md index 2cbb56958b1..ba82b65aceb 100644 --- a/docs-devsite/vertexai.generativemodel.md +++ b/docs-devsite/vertexai.generativemodel.md @@ -15,15 +15,15 @@ Class for generative model APIs. Signature: ```typescript -export declare class GenerativeModel extends GenAIModel +export declare class GenerativeModel extends AIModel ``` -Extends: [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) +Extends: [AIModel](./vertexai.aimodel.md#aimodel_class) ## Constructors | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(genAI, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(ai, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | ## Properties @@ -52,14 +52,14 @@ Constructs a new instance of the `GenerativeModel` class Signature: ```typescript -constructor(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| genAI | [GenAI](./vertexai.genai.md#genai_interface) | | +| ai | [AI](./vertexai.ai.md#ai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | diff --git a/docs-devsite/vertexai.imagenmodel.md b/docs-devsite/vertexai.imagenmodel.md index b6c29dc5d81..e69c49b8572 100644 --- a/docs-devsite/vertexai.imagenmodel.md +++ b/docs-devsite/vertexai.imagenmodel.md @@ -20,15 +20,15 @@ This class provides methods for generating images using the Imagen model. Signature: ```typescript -export declare class ImagenModel extends GenAIModel +export declare class ImagenModel extends AIModel ``` -Extends: [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) +Extends: [AIModel](./vertexai.aimodel.md#aimodel_class) ## Constructors | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(genAI, modelParams, requestOptions)](./vertexai.imagenmodel.md#imagenmodelconstructor) | | (Public Preview) Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class. | +| [(constructor)(ai, modelParams, requestOptions)](./vertexai.imagenmodel.md#imagenmodelconstructor) | | (Public Preview) Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class. | ## Properties @@ -54,14 +54,14 @@ Constructs a new instance of the [ImagenModel](./vertexai.imagenmodel.md#imagenm Signature: ```typescript -constructor(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); +constructor(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions | undefined); ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| genAI | [GenAI](./vertexai.genai.md#genai_interface) | A [GenAI](./vertexai.genai.md#genai_interface) instance. | +| ai | [AI](./vertexai.ai.md#ai_interface) | an [AI](./vertexai.ai.md#ai_interface) instance. | | modelParams | [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface) | Parameters to use when making requests to Imagen. | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) \| undefined | Additional options to use when making requests. | @@ -142,7 +142,7 @@ If the request to generate images fails. This happens if the prompt is blocked. ```javascript const imagen = new ImagenModel( - genAI, + ai, { model: 'imagen-3.0-generate-002' } diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md index 1b93328851e..544deb2987d 100644 --- a/docs-devsite/vertexai.md +++ b/docs-devsite/vertexai.md @@ -10,20 +10,20 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # vertexai package -The Vertex AI in Firebase Web SDK. +The Firebase AI Web SDK. ## Functions | Function | Description | | --- | --- | | function(app, ...) | -| [getGenAI(app, options)](./vertexai.md#getgenai_65c48ee) | Returns the default [GenAI](./vertexai.genai.md#genai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. | +| [getAI(app, options)](./vertexai.md#getai_a94a413) | Returns the default [AI](./vertexai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. | | [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | | function() | | [googleAIBackend()](./vertexai.md#googleaibackend) | Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. | -| function(genAI, ...) | -| [getGenerativeModel(genAI, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_e3ccf80) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | -| [getImagenModel(genAI, modelParams, requestOptions)](./vertexai.md#getimagenmodel_bffbd6b) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | +| function(ai, ...) | +| [getGenerativeModel(ai, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_80bd839) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | +| [getImagenModel(ai, modelParams, requestOptions)](./vertexai.md#getimagenmodel_e1f6645) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | | function(location, ...) | | [vertexAIBackend(location)](./vertexai.md#vertexaibackend_d0a4534) | Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. | @@ -31,11 +31,11 @@ The Vertex AI in Firebase Web SDK. | Class | Description | | --- | --- | +| [AIError](./vertexai.aierror.md#aierror_class) | Error class for the Firebase AI SDK. | +| [AIModel](./vertexai.aimodel.md#aimodel_class) | Base class for Firebase AI model APIs. | | [ArraySchema](./vertexai.arrayschema.md#arrayschema_class) | Schema class for "array" types. The items param should refer to the type of item that can be a member of the array. | | [BooleanSchema](./vertexai.booleanschema.md#booleanschema_class) | Schema class for "boolean" types. | | [ChatSession](./vertexai.chatsession.md#chatsession_class) | ChatSession class that enables sending chat messages and stores history of sent and received messages so far. | -| [GenAIError](./vertexai.genaierror.md#genaierror_class) | Error class for the Vertex AI in Firebase SDK. | -| [GenAIModel](./vertexai.genaimodel.md#genaimodel_class) | Base class for Vertex AI in Firebase model APIs. | | [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) | Class for generative model APIs. | | [ImagenImageFormat](./vertexai.imagenimageformat.md#imagenimageformat_class) | (Public Preview) Defines the image format for images generated by Imagen.Use this class to specify the desired format (JPEG or PNG) and compression quality for images generated by Imagen. This is typically included as part of [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface). | | [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) | (Public Preview) Class for Imagen model APIs.This class provides methods for generating images using the Imagen model. | @@ -49,10 +49,10 @@ The Vertex AI in Firebase Web SDK. | Enumeration | Description | | --- | --- | +| [AIErrorCode](./vertexai.md#aierrorcode) | Standardized error codes that [AIError](./vertexai.aierror.md#aierror_class) can have. | | [BlockReason](./vertexai.md#blockreason) | Reason that a prompt was blocked. | | [FinishReason](./vertexai.md#finishreason) | Reason that a candidate finished. | | [FunctionCallingMode](./vertexai.md#functioncallingmode) | | -| [GenAIErrorCode](./vertexai.md#genaierrorcode) | Standardized error codes that [GenAIError](./vertexai.genaierror.md#genaierror_class) can have. | | [HarmBlockMethod](./vertexai.md#harmblockmethod) | This property is not supported in Google AI. | | [HarmBlockThreshold](./vertexai.md#harmblockthreshold) | Threshold above which a prompt or candidate will be blocked. | | [HarmCategory](./vertexai.md#harmcategory) | Harm categories that would cause prompts or candidates to be blocked. | @@ -68,6 +68,8 @@ The Vertex AI in Firebase Web SDK. | Interface | Description | | --- | --- | +| [AI](./vertexai.ai.md#ai_interface) | An instance of the Firebase AI SDK.Do not create this instance directly. Instead, use [getAI()](./vertexai.md#getai_a94a413). | +| [AIOptions](./vertexai.aioptions.md#aioptions_interface) | Options interface for initializing the AI service using [getAI()](./vertexai.md#getai_a94a413). | | [BaseParams](./vertexai.baseparams.md#baseparams_interface) | Base parameters for a number of methods. | | [Citation](./vertexai.citation.md#citation_interface) | A single citation. | | [CitationMetadata](./vertexai.citationmetadata.md#citationmetadata_interface) | Citation metadata that may be found on a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface). | @@ -87,8 +89,6 @@ The Vertex AI in Firebase Web SDK. | [FunctionDeclarationsTool](./vertexai.functiondeclarationstool.md#functiondeclarationstool_interface) | A FunctionDeclarationsTool is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. | | [FunctionResponse](./vertexai.functionresponse.md#functionresponse_interface) | The result output from a [FunctionCall](./vertexai.functioncall.md#functioncall_interface) that contains a string representing the [FunctionDeclaration.name](./vertexai.functiondeclaration.md#functiondeclarationname) and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall](./vertexai.functioncall.md#functioncall_interface) made based on model prediction. | | [FunctionResponsePart](./vertexai.functionresponsepart.md#functionresponsepart_interface) | Content part interface if the part represents [FunctionResponse](./vertexai.functionresponse.md#functionresponse_interface). | -| [GenAI](./vertexai.genai.md#genai_interface) | An instance of the Firebase GenAI SDK.Do not create this instance directly. Instead, use [getGenAI()](./vertexai.md#getgenai_65c48ee). | -| [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) | Options interface for initializing the GenAI service using [getGenAI()](./vertexai.md#getgenai_65c48ee). | | [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | A candidate returned as part of a [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface). | | [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) | Request sent through [GenerativeModel.generateContent()](./vertexai.generativemodel.md#generativemodelgeneratecontent) | | [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface) | Individual response from [GenerativeModel.generateContent()](./vertexai.generativemodel.md#generativemodelgeneratecontent) and [GenerativeModel.generateContentStream()](./vertexai.generativemodel.md#generativemodelgeneratecontentstream). generateContentStream() will return one in each chunk until the stream is done. | @@ -106,10 +106,10 @@ The Vertex AI in Firebase Web SDK. | [ImagenSafetySettings](./vertexai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. | | [InlineDataPart](./vertexai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. | | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | -| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). | +| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | | [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. | | [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | -| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). | +| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | | [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | | | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. | @@ -122,7 +122,7 @@ The Vertex AI in Firebase Web SDK. | [TextPart](./vertexai.textpart.md#textpart_interface) | Content part interface if the part represents a text string. | | [ToolConfig](./vertexai.toolconfig.md#toolconfig_interface) | Tool config. This config is shared for all tools provided in the request. | | [UsageMetadata](./vertexai.usagemetadata.md#usagemetadata_interface) | Usage metadata about a [GenerateContentResponse](./vertexai.generatecontentresponse.md#generatecontentresponse_interface). | -| [VertexAIOptions](./vertexai.vertexaioptions.md#vertexaioptions_interface) | Options when initializing the Vertex AI in Firebase SDK. | +| [VertexAIOptions](./vertexai.vertexaioptions.md#vertexaioptions_interface) | Options when initializing the Firebase AI SDK. | | [VideoMetadata](./vertexai.videometadata.md#videometadata_interface) | Describes the input video content. | | [WebAttribution](./vertexai.webattribution.md#webattribution_interface) | | @@ -130,35 +130,35 @@ The Vertex AI in Firebase Web SDK. | Variable | Description | | --- | --- | -| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase GenAI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. | +| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase AI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. | | [POSSIBLE\_ROLES](./vertexai.md#possible_roles) | Possible roles. | -| [VertexAIError](./vertexai.md#vertexaierror) | Error class for the Vertex AI in Firebase SDK.For more information, refer to the documentation for the new [GenAIError](./vertexai.genaierror.md#genaierror_class). | -| [VertexAIModel](./vertexai.md#vertexaimodel) | Base class for Vertex AI in Firebase model APIs.For more information, refer to the documentation for the new [GenAIModel](./vertexai.genaimodel.md#genaimodel_class). | +| [VertexAIError](./vertexai.md#vertexaierror) | Error class for the Firebase AI SDK.For more information, refer to the documentation for the new [AIError](./vertexai.aierror.md#aierror_class). | +| [VertexAIModel](./vertexai.md#vertexaimodel) | Base class for Firebase AI model APIs.For more information, refer to the documentation for the new [AIModel](./vertexai.aimodel.md#aimodel_class). | ## Type Aliases | Type Alias | Description | | --- | --- | -| [Backend](./vertexai.md#backend) | Union type representing the backend configuration for the GenAI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object.Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). | +| [Backend](./vertexai.md#backend) | Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object.Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). | | [BackendType](./vertexai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI' or 'GOOGLE_AI'. | -| [GoogleAIBackend](./vertexai.md#googleaibackend) | Represents the configuration object for the Google AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the service with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). | +| [GoogleAIBackend](./vertexai.md#googleaibackend) | Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). | | [Part](./vertexai.md#part) | Content part - includes text, image/video, or function call/response part types. | | [Role](./vertexai.md#role) | Role is the producer of the content. | | [Tool](./vertexai.md#tool) | Defines a tool that model can call to access external knowledge. | | [TypedSchema](./vertexai.md#typedschema) | A type that includes all specific Schema types. | -| [VertexAI](./vertexai.md#vertexai) | An instance of the Vertex AI in Firebase SDK.For more information, refer to the documentation for the new [GenAI](./vertexai.genai.md#genai_interface). | -| [VertexAIBackend](./vertexai.md#vertexaibackend) | Represents the configuration object for the Vertex AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the server with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. | +| [VertexAI](./vertexai.md#vertexai) | An instance of the Firebase AI SDK.For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). | +| [VertexAIBackend](./vertexai.md#vertexaibackend) | Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. | ## function(app, ...) -### getGenAI(app, options) {:#getgenai_65c48ee} +### getAI(app, options) {:#getai_a94a413} -Returns the default [GenAI](./vertexai.genai.md#genai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. +Returns the default [AI](./vertexai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. Signature: ```typescript -export declare function getGenAI(app?: FirebaseApp, options?: GenAIOptions): GenAI; +export declare function getAI(app?: FirebaseApp, options?: AIOptions): AI; ``` #### Parameters @@ -166,19 +166,19 @@ export declare function getGenAI(app?: FirebaseApp, options?: GenAIOptions): Gen | Parameter | Type | Description | | --- | --- | --- | | app | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) to use. | -| options | [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) | [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) that configure the GenAI instance. | +| options | [AIOptions](./vertexai.aioptions.md#aioptions_interface) | [AIOptions](./vertexai.aioptions.md#aioptions_interface) that configure the AI instance. | Returns: -[GenAI](./vertexai.genai.md#genai_interface) +[AI](./vertexai.ai.md#ai_interface) -The default [GenAI](./vertexai.genai.md#genai_interface) instance for the given [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). +The default [AI](./vertexai.ai.md#ai_interface) instance for the given [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). ### Example 1 ```javascript -const genAI = getGenAI(app); +const ai = getAI(app); ``` @@ -186,8 +186,8 @@ const genAI = getGenAI(app); ```javascript -// Get a GenAI instance configured to use Google AI. -const genAI = getGenAI(app, { backend: googleAIBackend() }); +// Get an AI instance configured to use Google AI. +const ai = getAI(app, { backend: googleAIBackend() }); ``` @@ -195,8 +195,8 @@ const genAI = getGenAI(app, { backend: googleAIBackend() }); ```javascript -// Get a GenAI instance configured to use Vertex AI. -const genAI = getGenAI(app, { backend: vertexAIBackend() }); +// Get an AI instance configured to use Vertex AI. +const ai = getAI(app, { backend: vertexAIBackend() }); ``` @@ -238,23 +238,23 @@ export declare function googleAIBackend(): GoogleAIBackend; A [GoogleAIBackend](./vertexai.md#googleaibackend) object. -## function(genAI, ...) +## function(ai, ...) -### getGenerativeModel(genAI, modelParams, requestOptions) {:#getgenerativemodel_e3ccf80} +### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_80bd839} Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. Signature: ```typescript -export declare function getGenerativeModel(genAI: GenAI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| genAI | [GenAI](./vertexai.genai.md#genai_interface) | | +| ai | [AI](./vertexai.ai.md#ai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | @@ -262,7 +262,7 @@ export declare function getGenerativeModel(genAI: GenAI, modelParams: ModelParam [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) -### getImagenModel(genAI, modelParams, requestOptions) {:#getimagenmodel_bffbd6b} +### getImagenModel(ai, modelParams, requestOptions) {:#getimagenmodel_e1f6645} > This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. > @@ -274,14 +274,14 @@ Only Imagen 3 models (named `imagen-3.0-*`) are supported. Signature: ```typescript -export declare function getImagenModel(genAI: GenAI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; +export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; ``` #### Parameters | Parameter | Type | Description | | --- | --- | --- | -| genAI | [GenAI](./vertexai.genai.md#genai_interface) | A [GenAI](./vertexai.genai.md#genai_interface) instance. | +| ai | [AI](./vertexai.ai.md#ai_interface) | An [AI](./vertexai.ai.md#ai_interface) instance. | | modelParams | [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface) | Parameters to use when making Imagen requests. | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Additional options to use when making requests. | @@ -319,7 +319,7 @@ A [VertexAIBackend](./vertexai.md#vertexaibackend) object. ## BackendType -An enum-like object containing constants that represent the supported backends for the Firebase GenAI SDK. +An enum-like object containing constants that represent the supported backends for the Firebase AI SDK. These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. @@ -344,31 +344,31 @@ POSSIBLE_ROLES: readonly ["user", "model", "function", "system"] ## VertexAIError -Error class for the Vertex AI in Firebase SDK. +Error class for the Firebase AI SDK. -For more information, refer to the documentation for the new [GenAIError](./vertexai.genaierror.md#genaierror_class). +For more information, refer to the documentation for the new [AIError](./vertexai.aierror.md#aierror_class). Signature: ```typescript -VertexAIError: typeof GenAIError +VertexAIError: typeof AIError ``` ## VertexAIModel -Base class for Vertex AI in Firebase model APIs. +Base class for Firebase AI model APIs. -For more information, refer to the documentation for the new [GenAIModel](./vertexai.genaimodel.md#genaimodel_class). +For more information, refer to the documentation for the new [AIModel](./vertexai.aimodel.md#aimodel_class). Signature: ```typescript -VertexAIModel: typeof GenAIModel +VertexAIModel: typeof AIModel ``` ## Backend -Union type representing the backend configuration for the GenAI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object. +Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object. Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). @@ -390,7 +390,7 @@ export type BackendType = (typeof BackendType)[keyof typeof BackendType]; ## GoogleAIBackend -Represents the configuration object for the Google AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the service with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). +Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). Signature: @@ -442,19 +442,19 @@ export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanS ## VertexAI -An instance of the Vertex AI in Firebase SDK. +An instance of the Firebase AI SDK. -For more information, refer to the documentation for the new [GenAI](./vertexai.genai.md#genai_interface). +For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). Signature: ```typescript -export type VertexAI = GenAI; +export type VertexAI = AI; ``` ## VertexAIBackend -Represents the configuration object for the Vertex AI backend. Use this with [GenAIOptions](./vertexai.genaioptions.md#genaioptions_interface) when initializing the server with [getGenAI()](./vertexai.md#getgenai_65c48ee). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. +Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. Signature: @@ -465,6 +465,34 @@ export type VertexAIBackend = { }; ``` +## AIErrorCode + +Standardized error codes that [AIError](./vertexai.aierror.md#aierror_class) can have. + +Signature: + +```typescript +export declare const enum AIErrorCode +``` + +## Enumeration Members + +| Member | Value | Description | +| --- | --- | --- | +| API\_NOT\_ENABLED | "api-not-enabled" | An error due to the Firebase API not being enabled in the Console. | +| ERROR | "error" | A generic error occurred. | +| FETCH\_ERROR | "fetch-error" | An error occurred while performing a fetch. | +| INVALID\_CONTENT | "invalid-content" | An error associated with a Content object. | +| INVALID\_SCHEMA | "invalid-schema" | An error due to invalid Schema input. | +| NO\_API\_KEY | "no-api-key" | An error occurred due to a missing Firebase API key. | +| NO\_APP\_ID | "no-app-id" | An error occured due to a missing Firebase app ID. | +| NO\_MODEL | "no-model" | An error occurred due to a model name not being specified during initialization. | +| NO\_PROJECT\_ID | "no-project-id" | An error occurred due to a missing project ID. | +| PARSE\_FAILED | "parse-failed" | An error occurred while parsing. | +| REQUEST\_ERROR | "request-error" | An error occurred in a request. | +| RESPONSE\_ERROR | "response-error" | An error occurred in a response. | +| UNSUPPORTED | "unsupported" | An error occured due an attempt to use an unsupported feature. | + ## BlockReason Reason that a prompt was blocked. @@ -525,34 +553,6 @@ export declare enum FunctionCallingMode | AUTO | "AUTO" | Default model behavior; model decides to predict either a function call or a natural language response. | | NONE | "NONE" | Model will not predict any function call. Model behavior is same as when not passing any function declarations. | -## GenAIErrorCode - -Standardized error codes that [GenAIError](./vertexai.genaierror.md#genaierror_class) can have. - -Signature: - -```typescript -export declare const enum GenAIErrorCode -``` - -## Enumeration Members - -| Member | Value | Description | -| --- | --- | --- | -| API\_NOT\_ENABLED | "api-not-enabled" | An error due to the Firebase API not being enabled in the Console. | -| ERROR | "error" | A generic error occurred. | -| FETCH\_ERROR | "fetch-error" | An error occurred while performing a fetch. | -| INVALID\_CONTENT | "invalid-content" | An error associated with a Content object. | -| INVALID\_SCHEMA | "invalid-schema" | An error due to invalid Schema input. | -| NO\_API\_KEY | "no-api-key" | An error occurred due to a missing Firebase API key. | -| NO\_APP\_ID | "no-app-id" | An error occured due to a missing Firebase app ID. | -| NO\_MODEL | "no-model" | An error occurred due to a model name not being specified during initialization. | -| NO\_PROJECT\_ID | "no-project-id" | An error occurred due to a missing project ID. | -| PARSE\_FAILED | "parse-failed" | An error occurred while parsing. | -| REQUEST\_ERROR | "request-error" | An error occurred in a request. | -| RESPONSE\_ERROR | "response-error" | An error occurred in a response. | -| UNSUPPORTED | "unsupported" | An error occured due an attempt to use an unsupported feature. | - ## HarmBlockMethod This property is not supported in Google AI. diff --git a/docs-devsite/vertexai.modelparams.md b/docs-devsite/vertexai.modelparams.md index f25f37e4dd6..bb8a87d5fb2 100644 --- a/docs-devsite/vertexai.modelparams.md +++ b/docs-devsite/vertexai.modelparams.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # ModelParams interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). Signature: diff --git a/docs-devsite/vertexai.requestoptions.md b/docs-devsite/vertexai.requestoptions.md index ffedaa69859..3c233d72b90 100644 --- a/docs-devsite/vertexai.requestoptions.md +++ b/docs-devsite/vertexai.requestoptions.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # RequestOptions interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_e3ccf80). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). Signature: diff --git a/docs-devsite/vertexai.safetysetting.md b/docs-devsite/vertexai.safetysetting.md index 5a837e8aeff..92f98e3c02a 100644 --- a/docs-devsite/vertexai.safetysetting.md +++ b/docs-devsite/vertexai.safetysetting.md @@ -23,7 +23,7 @@ export interface SafetySetting | Property | Type | Description | | --- | --- | --- | | [category](./vertexai.safetysetting.md#safetysettingcategory) | [HarmCategory](./vertexai.md#harmcategory) | | -| [method](./vertexai.safetysetting.md#safetysettingmethod) | [HarmBlockMethod](./vertexai.md#harmblockmethod) | This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, a [GenAIError](./vertexai.genaierror.md#genaierror_class) will be thrown. | +| [method](./vertexai.safetysetting.md#safetysettingmethod) | [HarmBlockMethod](./vertexai.md#harmblockmethod) | This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, an [AIError](./vertexai.aierror.md#aierror_class) will be thrown. | | [threshold](./vertexai.safetysetting.md#safetysettingthreshold) | [HarmBlockThreshold](./vertexai.md#harmblockthreshold) | | ## SafetySetting.category @@ -36,7 +36,7 @@ category: HarmCategory; ## SafetySetting.method -This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, a [GenAIError](./vertexai.genaierror.md#genaierror_class) will be thrown. +This property is not supported in Google AI. If this is a property on a [GenerateContentRequest](./vertexai.generatecontentrequest.md#generatecontentrequest_interface) to be sent, an [AIError](./vertexai.aierror.md#aierror_class) will be thrown. Signature: diff --git a/docs-devsite/vertexai.vertexaioptions.md b/docs-devsite/vertexai.vertexaioptions.md index e15b525bfed..776dfd29374 100644 --- a/docs-devsite/vertexai.vertexaioptions.md +++ b/docs-devsite/vertexai.vertexaioptions.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # VertexAIOptions interface -Options when initializing the Vertex AI in Firebase SDK. +Options when initializing the Firebase AI SDK. Signature: diff --git a/packages/firebase/package.json b/packages/firebase/package.json index f47e3378ee2..e1609b07176 100644 --- a/packages/firebase/package.json +++ b/packages/firebase/package.json @@ -227,7 +227,7 @@ }, "default": "./storage/dist/esm/index.esm.js" }, - "./genai": { + "./ai": { "types": "./vertexai/dist/vertexai/index.d.ts", "node": { "require": "./vertexai/dist/index.cjs.js", diff --git a/packages/vertexai/src/api.test.ts b/packages/vertexai/src/api.test.ts index dc04b918dd9..bf85c557b1e 100644 --- a/packages/vertexai/src/api.test.ts +++ b/packages/vertexai/src/api.test.ts @@ -14,8 +14,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { ImagenModelParams, ModelParams, GenAIErrorCode } from './types'; -import { GenAIError } from './errors'; +import { ImagenModelParams, ModelParams, AIErrorCode } from './types'; +import { AIError } from './errors'; import { ImagenModel, getGenerativeModel, @@ -24,11 +24,11 @@ import { vertexAIBackend } from './api'; import { expect } from 'chai'; -import { BackendType, GenAI } from './public-types'; +import { BackendType, AI } from './public-types'; import { GenerativeModel } from './models/generative-model'; import { DEFAULT_LOCATION } from './constants'; -const fakeGenAI: GenAI = { +const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -45,129 +45,129 @@ const fakeGenAI: GenAI = { describe('Top level API', () => { it('getGenerativeModel throws if no model is provided', () => { try { - getGenerativeModel(fakeGenAI, {} as ModelParams); + getGenerativeModel(fakeAI, {} as ModelParams); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_MODEL); - expect((e as GenAIError).message).includes( + expect((e as AIError).code).includes(AIErrorCode.NO_MODEL); + expect((e as AIError).message).includes( `VertexAI: Must provide a model name. Example: ` + - `getGenerativeModel({ model: 'my-model-name' }) (vertexAI/${GenAIErrorCode.NO_MODEL})` + `getGenerativeModel({ model: 'my-model-name' }) (vertexAI/${AIErrorCode.NO_MODEL})` ); } }); it('getGenerativeModel throws if no apiKey is provided', () => { const fakeVertexNoApiKey = { - ...fakeGenAI, + ...fakeAI, app: { options: { projectId: 'my-project', appId: 'my-appid' } } - } as GenAI; + } as AI; try { getGenerativeModel(fakeVertexNoApiKey, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_API_KEY); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_API_KEY); + expect((e as AIError).message).equals( `VertexAI: The "apiKey" field is empty in the local ` + - `Firebase config. Firebase VertexAI requires this field to` + - ` contain a valid API key. (vertexAI/${GenAIErrorCode.NO_API_KEY})` + `Firebase config. Firebase AI requires this field to` + + ` contain a valid API key. (vertexAI/${AIErrorCode.NO_API_KEY})` ); } }); it('getGenerativeModel throws if no projectId is provided', () => { const fakeVertexNoProject = { - ...fakeGenAI, + ...fakeAI, app: { options: { apiKey: 'my-key', appId: 'my-appid' } } - } as GenAI; + } as AI; try { getGenerativeModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_PROJECT_ID); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_PROJECT_ID); + expect((e as AIError).message).equals( `VertexAI: The "projectId" field is empty in the local` + - ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid project ID. (vertexAI/${GenAIErrorCode.NO_PROJECT_ID})` + ` Firebase config. Firebase AI requires this field ` + + `to contain a valid project ID. (vertexAI/${AIErrorCode.NO_PROJECT_ID})` ); } }); it('getGenerativeModel throws if no appId is provided', () => { const fakeVertexNoProject = { - ...fakeGenAI, + ...fakeAI, app: { options: { apiKey: 'my-key', projectId: 'my-projectid' } } - } as GenAI; + } as AI; try { getGenerativeModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_APP_ID); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_APP_ID); + expect((e as AIError).message).equals( `VertexAI: The "appId" field is empty in the local` + - ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid app ID. (vertexAI/${GenAIErrorCode.NO_APP_ID})` + ` Firebase config. Firebase AI requires this field ` + + `to contain a valid app ID. (vertexAI/${AIErrorCode.NO_APP_ID})` ); } }); it('getGenerativeModel gets a GenerativeModel', () => { - const genModel = getGenerativeModel(fakeGenAI, { model: 'my-model' }); + const genModel = getGenerativeModel(fakeAI, { model: 'my-model' }); expect(genModel).to.be.an.instanceOf(GenerativeModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); it('getImagenModel throws if no model is provided', () => { try { - getImagenModel(fakeGenAI, {} as ImagenModelParams); + getImagenModel(fakeAI, {} as ImagenModelParams); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_MODEL); - expect((e as GenAIError).message).includes( + expect((e as AIError).code).includes(AIErrorCode.NO_MODEL); + expect((e as AIError).message).includes( `VertexAI: Must provide a model name. Example: ` + - `getImagenModel({ model: 'my-model-name' }) (vertexAI/${GenAIErrorCode.NO_MODEL})` + `getImagenModel({ model: 'my-model-name' }) (vertexAI/${AIErrorCode.NO_MODEL})` ); } }); it('getImagenModel throws if no apiKey is provided', () => { const fakeVertexNoApiKey = { - ...fakeGenAI, + ...fakeAI, app: { options: { projectId: 'my-project', appId: 'my-appid' } } - } as GenAI; + } as AI; try { getImagenModel(fakeVertexNoApiKey, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_API_KEY); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_API_KEY); + expect((e as AIError).message).equals( `VertexAI: The "apiKey" field is empty in the local ` + - `Firebase config. Firebase VertexAI requires this field to` + - ` contain a valid API key. (vertexAI/${GenAIErrorCode.NO_API_KEY})` + `Firebase config. Firebase AI requires this field to` + + ` contain a valid API key. (vertexAI/${AIErrorCode.NO_API_KEY})` ); } }); it('getImagenModel throws if no projectId is provided', () => { const fakeVertexNoProject = { - ...fakeGenAI, + ...fakeAI, app: { options: { apiKey: 'my-key', appId: 'my-appid' } } - } as GenAI; + } as AI; try { getImagenModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_PROJECT_ID); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_PROJECT_ID); + expect((e as AIError).message).equals( `VertexAI: The "projectId" field is empty in the local` + - ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid project ID. (vertexAI/${GenAIErrorCode.NO_PROJECT_ID})` + ` Firebase config. Firebase AI requires this field ` + + `to contain a valid project ID. (vertexAI/${AIErrorCode.NO_PROJECT_ID})` ); } }); it('getImagenModel throws if no appId is provided', () => { const fakeVertexNoProject = { - ...fakeGenAI, + ...fakeAI, app: { options: { apiKey: 'my-key', projectId: 'my-project' } } - } as GenAI; + } as AI; try { getImagenModel(fakeVertexNoProject, { model: 'my-model' }); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.NO_APP_ID); - expect((e as GenAIError).message).equals( + expect((e as AIError).code).includes(AIErrorCode.NO_APP_ID); + expect((e as AIError).message).equals( `VertexAI: The "appId" field is empty in the local` + - ` Firebase config. Firebase VertexAI requires this field ` + - `to contain a valid app ID. (vertexAI/${GenAIErrorCode.NO_APP_ID})` + ` Firebase config. Firebase AI requires this field ` + + `to contain a valid app ID. (vertexAI/${AIErrorCode.NO_APP_ID})` ); } }); it('getImagenModel gets an ImagenModel', () => { - const genModel = getImagenModel(fakeGenAI, { model: 'my-model' }); + const genModel = getImagenModel(fakeAI, { model: 'my-model' }); expect(genModel).to.be.an.instanceOf(ImagenModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index 11f9dbcf56c..d5e641a582c 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -18,12 +18,12 @@ import { FirebaseApp, getApp, _getProvider } from '@firebase/app'; import { Provider } from '@firebase/component'; import { getModularInstance } from '@firebase/util'; -import { DEFAULT_LOCATION, GENAI_TYPE } from './constants'; -import { GenAIService } from './service'; +import { DEFAULT_LOCATION, AI_TYPE } from './constants'; +import { AIService } from './service'; import { BackendType, - GenAI, - GenAIOptions, + AI, + AIOptions, GoogleAIBackend, VertexAI, VertexAIBackend, @@ -33,44 +33,46 @@ import { ImagenModelParams, ModelParams, RequestOptions, - GenAIErrorCode + AIErrorCode } from './types'; -import { GenAIError } from './errors'; -import { GenAIModel, GenerativeModel, ImagenModel } from './models'; +import { AIError } from './errors'; +import { AIModel, GenerativeModel, ImagenModel } from './models'; import { encodeInstanceIdentifier } from './helpers'; export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; export { ImagenImageFormat } from './requests/imagen-image-format'; -export { GenAIModel, GenerativeModel, ImagenModel, GenAIError }; +export { AIModel, GenerativeModel, ImagenModel, AIError }; -export { GenAIErrorCode as VertexAIErrorCode }; +export { AIErrorCode as VertexAIErrorCode }; /** - * Base class for Vertex AI in Firebase model APIs. + * Base class for Firebase AI model APIs. * - * For more information, refer to the documentation for the new {@link GenAIModel}. + * For more information, refer to the documentation for the new {@link AIModel}. * * @public */ -export const VertexAIModel = GenAIModel; +export const VertexAIModel = AIModel; /** - * Error class for the Vertex AI in Firebase SDK. + * Error class for the Firebase AI SDK. * - * For more information, refer to the documentation for the new {@link GenAIError}. + * For more information, refer to the documentation for the new {@link AIError}. * * @public */ -export const VertexAIError = GenAIError; +export const VertexAIError = AIError; declare module '@firebase/component' { interface NameServiceMapping { - [GENAI_TYPE]: GenAIService; + [AI_TYPE]: AIService; } } /** + * It is recommended to use the new {@link getAI | getAI()}. + * * Returns a {@link VertexAI} instance for the given app. * * @public @@ -83,55 +85,55 @@ export function getVertexAI( ): VertexAI { app = getModularInstance(app); // Dependencies - const genAIProvider: Provider<'genAI'> = _getProvider(app, GENAI_TYPE); + const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE); const identifier = encodeInstanceIdentifier({ backendType: BackendType.VERTEX_AI, location: options?.location ?? DEFAULT_LOCATION }); - return genAIProvider.getImmediate({ + return AIProvider.getImmediate({ identifier }); } /** - * Returns the default {@link GenAI} instance that is associated with the provided + * Returns the default {@link AI} instance that is associated with the provided * {@link @firebase/app#FirebaseApp}. If no instance exists, initializes a new instance with the * default settings. * * @example * ```javascript - * const genAI = getGenAI(app); + * const ai = getAI(app); * ``` * * @example * ```javascript - * // Get a GenAI instance configured to use Google AI. - * const genAI = getGenAI(app, { backend: googleAIBackend() }); + * // Get an AI instance configured to use Google AI. + * const ai = getAI(app, { backend: googleAIBackend() }); * ``` * * @example * ```javascript - * // Get a GenAI instance configured to use Vertex AI. - * const genAI = getGenAI(app, { backend: vertexAIBackend() }); + * // Get an AI instance configured to use Vertex AI. + * const ai = getAI(app, { backend: vertexAIBackend() }); * ``` * * @param app - The {@link @firebase/app#FirebaseApp} to use. - * @param options - {@link GenAIOptions} that configure the GenAI instance. - * @returns The default {@link GenAI} instance for the given {@link @firebase/app#FirebaseApp}. + * @param options - {@link AIOptions} that configure the AI instance. + * @returns The default {@link AI} instance for the given {@link @firebase/app#FirebaseApp}. * * @public */ -export function getGenAI( +export function getAI( app: FirebaseApp = getApp(), - options: GenAIOptions = { backend: googleAIBackend() } -): GenAI { + options: AIOptions = { backend: googleAIBackend() } +): AI { app = getModularInstance(app); // Dependencies - const genAIProvider: Provider<'genAI'> = _getProvider(app, GENAI_TYPE); + const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE); const identifier = encodeInstanceIdentifier(options.backend); - return genAIProvider.getImmediate({ + return AIProvider.getImmediate({ identifier }); } @@ -177,17 +179,17 @@ export function vertexAIBackend(location?: string): VertexAIBackend { * @public */ export function getGenerativeModel( - genAI: GenAI, + ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions ): GenerativeModel { if (!modelParams.model) { - throw new GenAIError( - GenAIErrorCode.NO_MODEL, + throw new AIError( + AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })` ); } - return new GenerativeModel(genAI, modelParams, requestOptions); + return new GenerativeModel(ai, modelParams, requestOptions); } /** @@ -195,7 +197,7 @@ export function getGenerativeModel( * * Only Imagen 3 models (named `imagen-3.0-*`) are supported. * - * @param genAI - A {@link GenAI} instance. + * @param ai - An {@link AI} instance. * @param modelParams - Parameters to use when making Imagen requests. * @param requestOptions - Additional options to use when making requests. * @@ -205,15 +207,15 @@ export function getGenerativeModel( * @beta */ export function getImagenModel( - genAI: GenAI, + ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions ): ImagenModel { if (!modelParams.model) { - throw new GenAIError( - GenAIErrorCode.NO_MODEL, + throw new AIError( + AIErrorCode.NO_MODEL, `Must provide a model name. Example: getImagenModel({ model: 'my-model-name' })` ); } - return new ImagenModel(genAI, modelParams, requestOptions); + return new ImagenModel(ai, modelParams, requestOptions); } diff --git a/packages/vertexai/src/backwards-compatbility.test.ts b/packages/vertexai/src/backwards-compatbility.test.ts index 23d0511445a..7f96328b305 100644 --- a/packages/vertexai/src/backwards-compatbility.test.ts +++ b/packages/vertexai/src/backwards-compatbility.test.ts @@ -17,8 +17,8 @@ import { expect } from 'chai'; import { - GenAIError, - GenAIModel, + AIError, + AIModel, GenerativeModel, VertexAIError, VertexAIErrorCode, @@ -27,11 +27,11 @@ import { getImagenModel, vertexAIBackend } from './api'; -import { GenAI, VertexAI, GenAIErrorCode } from './public-types'; +import { AI, VertexAI, AIErrorCode } from './public-types'; function assertAssignable(): void {} -const fakeGenAI: GenAI = { +const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -45,28 +45,28 @@ const fakeGenAI: GenAI = { location: 'us-central1' }; -const fakeVertexAI: VertexAI = fakeGenAI; +const fakeVertexAI: VertexAI = fakeAI; describe('backwards-compatible types', () => { - it('GenAI is backwards compatible with VertexAI', () => { - assertAssignable(); + it('AI is backwards compatible with VertexAI', () => { + assertAssignable(); }); - it('GenAIError is backwards compatible with VertexAIError', () => { - assertAssignable(); + it('AIError is backwards compatible with VertexAIError', () => { + assertAssignable(); const err = new VertexAIError(VertexAIErrorCode.ERROR, ''); - expect(err).instanceOf(GenAIError); + expect(err).instanceOf(AIError); expect(err).instanceOf(VertexAIError); }); - it('GenAIErrorCode is backwards compatible with VertexAIErrorCode', () => { - assertAssignable(); - const errCode = GenAIErrorCode.ERROR; + it('AIErrorCode is backwards compatible with VertexAIErrorCode', () => { + assertAssignable(); + const errCode = AIErrorCode.ERROR; expect(errCode).to.equal(VertexAIErrorCode.ERROR); }); - it('GenAIModel is backwards compatible with VertexAIModel', () => { - assertAssignable(); + it('AIModel is backwards compatible with VertexAIModel', () => { + assertAssignable(); - const model = new GenerativeModel(fakeGenAI, { model: 'model-name' }); - expect(model).to.be.instanceOf(GenAIModel); + const model = new GenerativeModel(fakeAI, { model: 'model-name' }); + expect(model).to.be.instanceOf(AIModel); expect(model).to.be.instanceOf(VertexAIModel); }); }); @@ -74,12 +74,12 @@ describe('backwards-compatible types', () => { describe('backward-compatible functions', () => { it('getGenerativeModel', () => { const model = getGenerativeModel(fakeVertexAI, { model: 'model-name' }); - expect(model).to.be.instanceOf(GenAIModel); + expect(model).to.be.instanceOf(AIModel); expect(model).to.be.instanceOf(VertexAIModel); }); it('getImagenModel', () => { const model = getImagenModel(fakeVertexAI, { model: 'model-name' }); - expect(model).to.be.instanceOf(GenAIModel); + expect(model).to.be.instanceOf(AIModel); expect(model).to.be.instanceOf(VertexAIModel); }); }); diff --git a/packages/vertexai/src/constants.ts b/packages/vertexai/src/constants.ts index 4034e410003..8bb46222ddd 100644 --- a/packages/vertexai/src/constants.ts +++ b/packages/vertexai/src/constants.ts @@ -22,9 +22,9 @@ import { InstanceIdentifier } from './types/internal'; // TODO (v12): Remove this export const VERTEX_TYPE = 'vertexAI'; -export const GENAI_TYPE = 'genAI'; +export const AI_TYPE = 'AI'; -export const DEFAULT_INSTANCE_IDENTIFER: InstanceIdentifier = { +export const DEFAULT_INSTANCE_IDENTIFIER: InstanceIdentifier = { backendType: BackendType.GOOGLE_AI }; diff --git a/packages/vertexai/src/errors.ts b/packages/vertexai/src/errors.ts index c836e49d51e..85180fe8381 100644 --- a/packages/vertexai/src/errors.ts +++ b/packages/vertexai/src/errors.ts @@ -16,30 +16,30 @@ */ import { FirebaseError } from '@firebase/util'; -import { GenAIErrorCode as GenAIErrorCode, CustomErrorData } from './types'; +import { AIErrorCode as AIErrorCode, CustomErrorData } from './types'; import { VERTEX_TYPE } from './constants'; /** - * Error class for the Vertex AI in Firebase SDK. + * Error class for the Firebase AI SDK. * * @public */ -export class GenAIError extends FirebaseError { +export class AIError extends FirebaseError { /** - * Constructs a new instance of the `GenAIError` class. + * Constructs a new instance of the `AIError` class. * - * @param code - The error code from {@link GenAIErrorCode}. + * @param code - The error code from {@link AIErrorCode}. * @param message - A human-readable message describing the error. * @param customErrorData - Optional error data. */ constructor( - readonly code: GenAIErrorCode, + readonly code: AIErrorCode, message: string, readonly customErrorData?: CustomErrorData ) { // Match error format used by FirebaseError from ErrorFactory - const service = VERTEX_TYPE; // TODO (v12): Rename to GENAI_TYPE - const serviceName = 'VertexAI'; // TODO (v12): Rename to GenAI on breaking release. + const service = VERTEX_TYPE; // TODO (v12): Rename to GENAI_TYPE on breaking release. + const serviceName = 'VertexAI'; // TODO (v12): Rename to AI on breaking release. const fullCode = `${service}/${code}`; const fullMessage = `${serviceName}: ${message} (${fullCode})`; super(code, fullMessage); @@ -51,14 +51,14 @@ export class GenAIError extends FirebaseError { if (Error.captureStackTrace) { // Allows us to initialize the stack trace without including the constructor itself at the // top level of the stack trace. - Error.captureStackTrace(this, GenAIError); + Error.captureStackTrace(this, AIError); } - // Allows instanceof GenAIError in ES5/ES6 + // Allows instanceof AIError in ES5/ES6 // https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work // TODO(dlarocque): Replace this with `new.target`: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-2.html#support-for-newtarget // which we can now use since we no longer target ES5. - Object.setPrototypeOf(this, GenAIError.prototype); + Object.setPrototypeOf(this, AIError.prototype); // Since Error is an interface, we don't inherit toString and so we define it ourselves. this.toString = () => fullMessage; diff --git a/packages/vertexai/src/googleAIMappers.test.ts b/packages/vertexai/src/googleAIMappers.test.ts index 3952cce2544..6cf7425a293 100644 --- a/packages/vertexai/src/googleAIMappers.test.ts +++ b/packages/vertexai/src/googleAIMappers.test.ts @@ -36,7 +36,7 @@ import { HarmProbability, HarmSeverity, SafetyRating, - GenAIErrorCode, + AIErrorCode, FinishReason, PromptFeedback } from './types'; @@ -46,7 +46,7 @@ import { GoogleAICountTokensRequest } from './types/googleAI'; import { logger } from './logger'; -import { GenAIError } from './errors'; +import { AIError } from './errors'; import { getMockResponse } from '../test-utils/mock-response'; use(sinonChai); @@ -79,8 +79,8 @@ describe('Google AI Mappers', () => { ] }; expect(() => mapGenerateContentRequest(request)) - .to.throw(GenAIError, /SafetySetting.method is not supported/i) - .with.property('code', GenAIErrorCode.UNSUPPORTED); + .to.throw(AIError, /SafetySetting.method is not supported/i) + .with.property('code', AIErrorCode.UNSUPPORTED); }); it('should warn and round topK if present', () => { @@ -334,8 +334,8 @@ describe('Google AI Mappers', () => { } ]; expect(() => mapGenerateContentCandidates(candidates)) - .to.throw(GenAIError, /Part.videoMetadata is not supported/i) - .with.property('code', GenAIErrorCode.UNSUPPORTED); + .to.throw(AIError, /Part.videoMetadata is not supported/i) + .with.property('code', AIErrorCode.UNSUPPORTED); }); it('should handle candidates without citation or safety ratings', () => { diff --git a/packages/vertexai/src/googleAIMappers.ts b/packages/vertexai/src/googleAIMappers.ts index e61a3532be1..af278bef782 100644 --- a/packages/vertexai/src/googleAIMappers.ts +++ b/packages/vertexai/src/googleAIMappers.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { GenAIError } from './errors'; +import { AIError } from './errors'; import { logger } from './logger'; import { CitationMetadata, @@ -27,7 +27,7 @@ import { InlineDataPart, PromptFeedback, SafetyRating, - GenAIErrorCode + AIErrorCode } from './types'; import { GoogleAIGenerateContentResponse, @@ -62,8 +62,8 @@ export function mapGenerateContentRequest( ): GenerateContentRequest { generateContentRequest.safetySettings?.forEach(safetySetting => { if (safetySetting.method) { - throw new GenAIError( - GenAIErrorCode.UNSUPPORTED, + throw new AIError( + AIErrorCode.UNSUPPORTED, 'SafetySetting.method is not supported in the Google AI. Please remove this property.' ); } @@ -185,8 +185,8 @@ export function mapGenerateContentCandidates( part => (part as InlineDataPart)?.videoMetadata ) ) { - throw new GenAIError( - GenAIErrorCode.UNSUPPORTED, + throw new AIError( + AIErrorCode.UNSUPPORTED, 'Part.videoMetadata is not supported in Google AI. Please remove this property.' ); } diff --git a/packages/vertexai/src/helpers.test.ts b/packages/vertexai/src/helpers.test.ts index f7316e3f119..7ed8f6a754a 100644 --- a/packages/vertexai/src/helpers.test.ts +++ b/packages/vertexai/src/helpers.test.ts @@ -15,12 +15,12 @@ * limitations under the License. */ import { expect } from 'chai'; -import { GENAI_TYPE } from './constants'; +import { AI_TYPE } from './constants'; import { encodeInstanceIdentifier, decodeInstanceIdentifier } from './helpers'; -import { GenAIError } from './errors'; +import { AIError } from './errors'; import { BackendType } from './public-types'; import { InstanceIdentifier } from './types/internal'; -import { GenAIErrorCode } from './types'; +import { AIErrorCode } from './types'; describe('Identifier Encoding/Decoding', () => { describe('encodeInstanceIdentifier', () => { @@ -30,7 +30,7 @@ describe('Identifier Encoding/Decoding', () => { location: 'us-central1' }; console.log(identifier); - const expected = `${GENAI_TYPE}/vertexai/us-central1`; + const expected = `${AI_TYPE}/vertexai/us-central1`; expect(encodeInstanceIdentifier(identifier)).to.equal(expected); }); @@ -39,7 +39,7 @@ describe('Identifier Encoding/Decoding', () => { backendType: BackendType.VERTEX_AI, location: '' }; - const expected = `${GENAI_TYPE}/vertexai/`; + const expected = `${AI_TYPE}/vertexai/`; expect(encodeInstanceIdentifier(identifier)).to.equal(expected); }); @@ -47,32 +47,32 @@ describe('Identifier Encoding/Decoding', () => { const identifier: InstanceIdentifier = { backendType: BackendType.GOOGLE_AI }; - const expected = `${GENAI_TYPE}/googleai`; + const expected = `${AI_TYPE}/googleai`; expect(encodeInstanceIdentifier(identifier)).to.equal(expected); }); - it('should throw GenAIError for unknown backend type', () => { + it('should throw AIError for unknown backend type', () => { const identifier = { backendType: 'some-future-backend' } as any; // bypass type checking for the test - expect(() => encodeInstanceIdentifier(identifier)).to.throw(GenAIError); + expect(() => encodeInstanceIdentifier(identifier)).to.throw(AIError); try { encodeInstanceIdentifier(identifier); expect.fail('Expected encodeInstanceIdentifier to throw'); } catch (e) { - expect(e).to.be.instanceOf(GenAIError); - const error = e as GenAIError; + expect(e).to.be.instanceOf(AIError); + const error = e as AIError; expect(error.message).to.contain(`Unknown backend`); - expect(error.code).to.equal(GenAIErrorCode.ERROR); + expect(error.code).to.equal(AIErrorCode.ERROR); } }); }); describe('decodeInstanceIdentifier', () => { it('should decode Vertex AI identifier with location', () => { - const encoded = `${GENAI_TYPE}/vertexai/europe-west1`; + const encoded = `${AI_TYPE}/vertexai/europe-west1`; const expected: InstanceIdentifier = { backendType: BackendType.VERTEX_AI, location: 'europe-west1' @@ -81,58 +81,58 @@ describe('Identifier Encoding/Decoding', () => { }); it('should throw an error if Vertex AI identifier string without explicit location part', () => { - const encoded = `${GENAI_TYPE}/vertexai`; - expect(() => decodeInstanceIdentifier(encoded)).to.throw(GenAIError); + const encoded = `${AI_TYPE}/vertexai`; + expect(() => decodeInstanceIdentifier(encoded)).to.throw(AIError); try { decodeInstanceIdentifier(encoded); expect.fail('Expected encodeInstanceIdentifier to throw'); } catch (e) { - expect(e).to.be.instanceOf(GenAIError); - const error = e as GenAIError; + expect(e).to.be.instanceOf(AIError); + const error = e as AIError; expect(error.message).to.contain( `Invalid instance identifier, unknown location` ); - expect(error.code).to.equal(GenAIErrorCode.ERROR); + expect(error.code).to.equal(AIErrorCode.ERROR); } }); it('should decode Google AI identifier', () => { - const encoded = `${GENAI_TYPE}/googleai`; + const encoded = `${AI_TYPE}/googleai`; const expected: InstanceIdentifier = { backendType: BackendType.GOOGLE_AI }; expect(decodeInstanceIdentifier(encoded)).to.deep.equal(expected); }); - it('should throw GenAIError for invalid backend string', () => { - const encoded = `${GENAI_TYPE}/someotherbackend/location`; + it('should throw AIError for invalid backend string', () => { + const encoded = `${AI_TYPE}/someotherbackend/location`; expect(() => decodeInstanceIdentifier(encoded)).to.throw( - GenAIError, + AIError, `Invalid instance identifier string: '${encoded}'` ); try { decodeInstanceIdentifier(encoded); expect.fail('Expected decodeInstanceIdentifier to throw'); } catch (e) { - expect(e).to.be.instanceOf(GenAIError); - expect((e as GenAIError).code).to.equal(GenAIErrorCode.ERROR); + expect(e).to.be.instanceOf(AIError); + expect((e as AIError).code).to.equal(AIErrorCode.ERROR); } }); - it('should throw GenAIError for malformed identifier string (too few parts)', () => { - const encoded = GENAI_TYPE; + it('should throw AIError for malformed identifier string (too few parts)', () => { + const encoded = AI_TYPE; expect(() => decodeInstanceIdentifier(encoded)).to.throw( - GenAIError, + AIError, `Invalid instance identifier string: '${encoded}'` ); }); - it('should throw GenAIError for malformed identifier string (incorrect prefix)', () => { + it('should throw AIError for malformed identifier string (incorrect prefix)', () => { const encoded = 'firebase/vertexai/location'; // This will also hit the default case in the switch statement expect(() => decodeInstanceIdentifier(encoded)).to.throw( - GenAIError, + AIError, `Invalid instance identifier, unknown prefix 'firebase'` ); }); diff --git a/packages/vertexai/src/helpers.ts b/packages/vertexai/src/helpers.ts index 28f11a4b2bd..764d06fe9f7 100644 --- a/packages/vertexai/src/helpers.ts +++ b/packages/vertexai/src/helpers.ts @@ -15,16 +15,16 @@ * limitations under the License. */ -import { GENAI_TYPE } from './constants'; -import { GenAIError } from './errors'; +import { AI_TYPE } from './constants'; +import { AIError } from './errors'; import { BackendType } from './public-types'; import { InstanceIdentifier } from './types/internal'; -import { GenAIErrorCode } from './types'; +import { AIErrorCode } from './types'; /** * Encodes an {@link InstanceIdentifier} into a string. * - * This string is used to identify unique {@link GenAI} instances by backend type. + * This string is used to identify unique {@link AI} instances by backend type. * * @internal */ @@ -33,12 +33,12 @@ export function encodeInstanceIdentifier( ): string { switch (instanceIdentifier.backendType) { case BackendType.VERTEX_AI: - return `${GENAI_TYPE}/vertexai/${instanceIdentifier.location}`; + return `${AI_TYPE}/vertexai/${instanceIdentifier.location}`; case BackendType.GOOGLE_AI: - return `${GENAI_TYPE}/googleai`; + return `${AI_TYPE}/googleai`; default: - throw new GenAIError( - GenAIErrorCode.ERROR, + throw new AIError( + AIErrorCode.ERROR, `Unknown backend '${instanceIdentifier}'` ); } @@ -53,9 +53,9 @@ export function decodeInstanceIdentifier( instanceIdentifier: string ): InstanceIdentifier { const identifierParts = instanceIdentifier.split('/'); - if (identifierParts[0] !== GENAI_TYPE) { - throw new GenAIError( - GenAIErrorCode.ERROR, + if (identifierParts[0] !== AI_TYPE) { + throw new AIError( + AIErrorCode.ERROR, `Invalid instance identifier, unknown prefix '${identifierParts[0]}'` ); } @@ -64,8 +64,8 @@ export function decodeInstanceIdentifier( case 'vertexai': const location: string | undefined = identifierParts[2]; if (!location) { - throw new GenAIError( - GenAIErrorCode.ERROR, + throw new AIError( + AIErrorCode.ERROR, `Invalid instance identifier, unknown location '${instanceIdentifier}'` ); } @@ -78,8 +78,8 @@ export function decodeInstanceIdentifier( backendType: BackendType.GOOGLE_AI }; default: - throw new GenAIError( - GenAIErrorCode.ERROR, + throw new AIError( + AIErrorCode.ERROR, `Invalid instance identifier string: '${instanceIdentifier}'` ); } diff --git a/packages/vertexai/src/index.node.ts b/packages/vertexai/src/index.node.ts index 414aefdaffb..60ed127c19d 100644 --- a/packages/vertexai/src/index.node.ts +++ b/packages/vertexai/src/index.node.ts @@ -1,5 +1,5 @@ /** - * The Vertex AI in Firebase Web SDK. + * The Firebase AI Web SDK. * * @packageDocumentation */ @@ -22,17 +22,17 @@ */ import { registerVersion, _registerComponent } from '@firebase/app'; -import { GenAIService } from './service'; -import { DEFAULT_INSTANCE_IDENTIFER, GENAI_TYPE } from './constants'; +import { AIService } from './service'; +import { DEFAULT_INSTANCE_IDENTIFIER, AI_TYPE } from './constants'; import { Component, ComponentType } from '@firebase/component'; import { name, version } from '../package.json'; import { InstanceIdentifier } from './types/internal'; import { decodeInstanceIdentifier } from './helpers'; -function registerGenAI(): void { +function registerAI(): void { _registerComponent( new Component( - GENAI_TYPE, + AI_TYPE, (container, options) => { // getImmediate for FirebaseApp will always succeed const app = container.getProvider('app').getImmediate(); @@ -45,12 +45,12 @@ function registerGenAI(): void { options.instanceIdentifier ); } else { - instanceIdentifier = DEFAULT_INSTANCE_IDENTIFER; + instanceIdentifier = DEFAULT_INSTANCE_IDENTIFIER; } const backend = instanceIdentifier; - return new GenAIService(app, backend, auth, appCheckProvider); + return new AIService(app, backend, auth, appCheckProvider); }, ComponentType.PUBLIC ).setMultipleInstances(true) @@ -61,7 +61,7 @@ function registerGenAI(): void { registerVersion(name, version, '__BUILD_TARGET__'); } -registerGenAI(); +registerAI(); export * from './api'; export * from './public-types'; diff --git a/packages/vertexai/src/index.ts b/packages/vertexai/src/index.ts index 985469c5ce1..2f275e9d903 100644 --- a/packages/vertexai/src/index.ts +++ b/packages/vertexai/src/index.ts @@ -1,5 +1,5 @@ /** - * The Vertex AI in Firebase Web SDK. + * The Firebase AI Web SDK. * * @packageDocumentation */ @@ -22,13 +22,13 @@ */ import { registerVersion, _registerComponent } from '@firebase/app'; -import { GenAIService } from './service'; -import { GENAI_TYPE } from './constants'; +import { AIService } from './service'; +import { AI_TYPE } from './constants'; import { Component, ComponentType } from '@firebase/component'; import { name, version } from '../package.json'; import { decodeInstanceIdentifier } from './helpers'; -import { GenAIError } from './api'; -import { GenAIErrorCode } from './types'; +import { AIError } from './api'; +import { AIErrorCode } from './types'; declare global { interface Window { @@ -36,15 +36,15 @@ declare global { } } -function registerGenAI(): void { +function registerAI(): void { _registerComponent( new Component( - GENAI_TYPE, + AI_TYPE, (container, { instanceIdentifier }) => { if (!instanceIdentifier) { - throw new GenAIError( - GenAIErrorCode.ERROR, - 'GenAIService instance identifier is undefined.' + throw new AIError( + AIErrorCode.ERROR, + 'AIService instance identifier is undefined.' ); } @@ -53,7 +53,7 @@ function registerGenAI(): void { const app = container.getProvider('app').getImmediate(); const auth = container.getProvider('auth-internal'); const appCheckProvider = container.getProvider('app-check-internal'); - return new GenAIService(app, backend, auth, appCheckProvider); + return new AIService(app, backend, auth, appCheckProvider); }, ComponentType.PUBLIC ).setMultipleInstances(true) @@ -64,7 +64,7 @@ function registerGenAI(): void { registerVersion(name, version, '__BUILD_TARGET__'); } -registerGenAI(); +registerAI(); export * from './api'; export * from './public-types'; diff --git a/packages/vertexai/src/methods/chat-session-helpers.ts b/packages/vertexai/src/methods/chat-session-helpers.ts index a1a1713f584..ba462386e9b 100644 --- a/packages/vertexai/src/methods/chat-session-helpers.ts +++ b/packages/vertexai/src/methods/chat-session-helpers.ts @@ -15,8 +15,8 @@ * limitations under the License. */ -import { Content, POSSIBLE_ROLES, Part, Role, GenAIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { Content, POSSIBLE_ROLES, Part, Role, AIErrorCode } from '../types'; +import { AIError } from '../errors'; // https://ai.google.dev/api/rest/v1beta/Content#part @@ -48,14 +48,14 @@ export function validateChatHistory(history: Content[]): void { for (const currContent of history) { const { role, parts } = currContent; if (!prevContent && role !== 'user') { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `First Content should be with role 'user', got ${role}` ); } if (!POSSIBLE_ROLES.includes(role)) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `Each item should include role field. Got ${role} but valid roles are: ${JSON.stringify( POSSIBLE_ROLES )}` @@ -63,15 +63,15 @@ export function validateChatHistory(history: Content[]): void { } if (!Array.isArray(parts)) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `Content should have 'parts' but property with an array of Parts` ); } if (parts.length === 0) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `Each Content should have at least one part` ); } @@ -93,8 +93,8 @@ export function validateChatHistory(history: Content[]): void { const validParts = VALID_PARTS_PER_ROLE[role]; for (const key of VALID_PART_FIELDS) { if (!validParts.includes(key) && countFields[key] > 0) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `Content with role '${role}' can't contain '${key}' part` ); } @@ -103,8 +103,8 @@ export function validateChatHistory(history: Content[]): void { if (prevContent) { const validPreviousContentRoles = VALID_PREVIOUS_CONTENT_ROLES[role]; if (!validPreviousContentRoles.includes(prevContent.role)) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, `Content with role '${role} can't follow '${ prevContent.role }'. Valid previous roles: ${JSON.stringify( diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index 181d7d45b84..b5ef144f76f 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -23,7 +23,7 @@ import { getMockResponse } from '../../test-utils/mock-response'; import * as request from '../requests/request'; import { generateContent } from './generate-content'; import { - GenAIErrorCode, + AIErrorCode, GenerateContentRequest, HarmBlockMethod, HarmBlockThreshold, @@ -31,7 +31,7 @@ import { } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; -import { GenAIError, googleAIBackend, vertexAIBackend } from '../api'; +import { AIError, googleAIBackend, vertexAIBackend } from '../api'; import { mapGenerateContentRequest } from '../googleAIMappers'; use(sinonChai); @@ -341,14 +341,14 @@ describe('generateContent()', () => { ] }; - // Expect generateContent to throw a GenAIError that method is not supported. + // Expect generateContent to throw a AIError that method is not supported. await expect( generateContent( fakeGoogleAIApiSettings, 'model', requestParamsWithMethod ) - ).to.be.rejectedWith(GenAIError, GenAIErrorCode.UNSUPPORTED); + ).to.be.rejectedWith(AIError, AIErrorCode.UNSUPPORTED); expect(makeRequestStub).to.not.be.called; }); it('maps request to GoogleAI format', async () => { diff --git a/packages/vertexai/src/models/genai-model.test.ts b/packages/vertexai/src/models/genai-model.test.ts index 16bd54d9f6f..d8db0bea2ec 100644 --- a/packages/vertexai/src/models/genai-model.test.ts +++ b/packages/vertexai/src/models/genai-model.test.ts @@ -15,25 +15,25 @@ * limitations under the License. */ import { use, expect } from 'chai'; -import { GenAI, GenAIErrorCode } from '../public-types'; +import { AI, AIErrorCode } from '../public-types'; import sinonChai from 'sinon-chai'; -import { GenAIModel } from './genai-model'; -import { GenAIError } from '../errors'; +import { AIModel } from './genai-model'; +import { AIError } from '../errors'; import { vertexAIBackend } from '../api'; use(sinonChai); /** - * A class that extends GenAIModel that allows us to test the protected constructor. + * A class that extends AIModel that allows us to test the protected constructor. */ -class TestModel extends GenAIModel { +class TestModel extends AIModel { /* eslint-disable @typescript-eslint/no-useless-constructor */ - constructor(genAI: GenAI, modelName: string) { - super(genAI, modelName); + constructor(ai: AI, modelName: string) { + super(ai, modelName); } } -const fakeGenAI: GenAI = { +const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -47,28 +47,28 @@ const fakeGenAI: GenAI = { location: 'us-central1' }; -describe('GenAIModel', () => { +describe('AIModel', () => { it('handles plain model name', () => { - const testModel = new TestModel(fakeGenAI, 'my-model'); + const testModel = new TestModel(fakeAI, 'my-model'); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles models/ prefixed model name', () => { - const testModel = new TestModel(fakeGenAI, 'models/my-model'); + const testModel = new TestModel(fakeAI, 'models/my-model'); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles full model name', () => { const testModel = new TestModel( - fakeGenAI, + fakeAI, 'publishers/google/models/my-model' ); expect(testModel.model).to.equal('publishers/google/models/my-model'); }); it('handles prefixed tuned model name', () => { - const testModel = new TestModel(fakeGenAI, 'tunedModels/my-model'); + const testModel = new TestModel(fakeAI, 'tunedModels/my-model'); expect(testModel.model).to.equal('tunedModels/my-model'); }); it('throws if not passed an api key', () => { - const fakeGenAI: GenAI = { + const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -80,13 +80,13 @@ describe('GenAIModel', () => { location: 'us-central1' }; try { - new TestModel(fakeGenAI, 'my-model'); + new TestModel(fakeAI, 'my-model'); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_API_KEY); + expect((e as AIError).code).to.equal(AIErrorCode.NO_API_KEY); } }); it('throws if not passed a project ID', () => { - const fakeGenAI: GenAI = { + const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -98,13 +98,13 @@ describe('GenAIModel', () => { location: 'us-central1' }; try { - new TestModel(fakeGenAI, 'my-model'); + new TestModel(fakeAI, 'my-model'); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_PROJECT_ID); + expect((e as AIError).code).to.equal(AIErrorCode.NO_PROJECT_ID); } }); it('throws if not passed an app ID', () => { - const fakeGenAI: GenAI = { + const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -117,9 +117,9 @@ describe('GenAIModel', () => { location: 'us-central1' }; try { - new TestModel(fakeGenAI, 'my-model'); + new TestModel(fakeAI, 'my-model'); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.NO_APP_ID); + expect((e as AIError).code).to.equal(AIErrorCode.NO_APP_ID); } }); }); diff --git a/packages/vertexai/src/models/genai-model.ts b/packages/vertexai/src/models/genai-model.ts index 28e67c8552d..dd3b666d555 100644 --- a/packages/vertexai/src/models/genai-model.ts +++ b/packages/vertexai/src/models/genai-model.ts @@ -15,18 +15,18 @@ * limitations under the License. */ -import { GenAIError } from '../errors'; -import { GenAIErrorCode, GenAI, BackendType } from '../public-types'; -import { GenAIService } from '../service'; +import { AIError } from '../errors'; +import { AIErrorCode, AI, BackendType } from '../public-types'; +import { AIService } from '../service'; import { ApiSettings } from '../types/internal'; import { _isFirebaseServerApp } from '@firebase/app'; /** - * Base class for Vertex AI in Firebase model APIs. + * Base class for Firebase AI model APIs. * * @public */ -export abstract class GenAIModel { +export abstract class AIModel { /** * The fully qualified model resource name to use for generating images * (for example, `publishers/google/models/imagen-3.0-generate-002`). @@ -39,12 +39,12 @@ export abstract class GenAIModel { protected _apiSettings: ApiSettings; /** - * Constructs a new instance of the {@link GenAIModel} class. + * Constructs a new instance of the {@link AIModel} class. * * This constructor should only be called from subclasses that provide * a model API. * - * @param genAI - A {@link GenAI} instance. + * @param ai - an {@link AI} instance. * @param modelName - The name of the model being used. It can be in one of the following formats: * - `my-model` (short name, will resolve to `publishers/google/models/my-model`) * - `models/my-model` (will resolve to `publishers/google/models/my-model`) @@ -55,49 +55,49 @@ export abstract class GenAIModel { * * @internal */ - protected constructor(genAI: GenAI, modelName: string) { - if (!genAI.app?.options?.apiKey) { - throw new GenAIError( - GenAIErrorCode.NO_API_KEY, - `The "apiKey" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid API key.` + protected constructor(ai: AI, modelName: string) { + if (!ai.app?.options?.apiKey) { + throw new AIError( + AIErrorCode.NO_API_KEY, + `The "apiKey" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid API key.` ); - } else if (!genAI.app?.options?.projectId) { - throw new GenAIError( - GenAIErrorCode.NO_PROJECT_ID, - `The "projectId" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid project ID.` + } else if (!ai.app?.options?.projectId) { + throw new AIError( + AIErrorCode.NO_PROJECT_ID, + `The "projectId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid project ID.` ); - } else if (!genAI.app?.options?.appId) { - throw new GenAIError( - GenAIErrorCode.NO_APP_ID, - `The "appId" field is empty in the local Firebase config. Firebase VertexAI requires this field to contain a valid app ID.` + } else if (!ai.app?.options?.appId) { + throw new AIError( + AIErrorCode.NO_APP_ID, + `The "appId" field is empty in the local Firebase config. Firebase AI requires this field to contain a valid app ID.` ); } else { this._apiSettings = { - apiKey: genAI.app.options.apiKey, - project: genAI.app.options.projectId, - appId: genAI.app.options.appId, + apiKey: ai.app.options.apiKey, + project: ai.app.options.projectId, + appId: ai.app.options.appId, automaticDataCollectionEnabled: - genAI.app.automaticDataCollectionEnabled, - location: genAI.location, - backend: genAI.backend + ai.app.automaticDataCollectionEnabled, + location: ai.location, + backend: ai.backend }; - if (_isFirebaseServerApp(genAI.app) && genAI.app.settings.appCheckToken) { - const token = genAI.app.settings.appCheckToken; + if (_isFirebaseServerApp(ai.app) && ai.app.settings.appCheckToken) { + const token = ai.app.settings.appCheckToken; this._apiSettings.getAppCheckToken = () => { return Promise.resolve({ token }); }; - } else if ((genAI as GenAIService).appCheck) { + } else if ((ai as AIService).appCheck) { this._apiSettings.getAppCheckToken = () => - (genAI as GenAIService).appCheck!.getToken(); + (ai as AIService).appCheck!.getToken(); } - if ((genAI as GenAIService).auth) { + if ((ai as AIService).auth) { this._apiSettings.getAuthToken = () => - (genAI as GenAIService).auth!.getToken(); + (ai as AIService).auth!.getToken(); } - this.model = GenAIModel.normalizeModelName( + this.model = AIModel.normalizeModelName( modelName, this._apiSettings.backend.backendType ); @@ -117,9 +117,9 @@ export abstract class GenAIModel { backendType: BackendType ): string { if (backendType === BackendType.GOOGLE_AI) { - return GenAIModel.normalizeGoogleAIModelName(modelName); + return AIModel.normalizeGoogleAIModelName(modelName); } else { - return GenAIModel.normalizeVertexAIModelName(modelName); + return AIModel.normalizeVertexAIModelName(modelName); } } diff --git a/packages/vertexai/src/models/generative-model.test.ts b/packages/vertexai/src/models/generative-model.test.ts index a8245fe16ef..7f61070a52a 100644 --- a/packages/vertexai/src/models/generative-model.test.ts +++ b/packages/vertexai/src/models/generative-model.test.ts @@ -16,7 +16,7 @@ */ import { use, expect } from 'chai'; import { GenerativeModel } from './generative-model'; -import { FunctionCallingMode, GenAI } from '../public-types'; +import { FunctionCallingMode, AI } from '../public-types'; import * as request from '../requests/request'; import { match, restore, stub } from 'sinon'; import { getMockResponse } from '../../test-utils/mock-response'; @@ -25,7 +25,7 @@ import { vertexAIBackend } from '../api'; use(sinonChai); -const fakeGenAI: GenAI = { +const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -41,7 +41,7 @@ const fakeGenAI: GenAI = { describe('GenerativeModel', () => { it('passes params through to generateContent', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', tools: [ { @@ -86,7 +86,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to generateContent', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', systemInstruction: 'be friendly' }); @@ -112,7 +112,7 @@ describe('GenerativeModel', () => { restore(); }); it('generateContent overrides model values', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', tools: [ { @@ -168,7 +168,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes params through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', tools: [ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } @@ -206,7 +206,7 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', systemInstruction: 'be friendly' }); @@ -232,7 +232,7 @@ describe('GenerativeModel', () => { restore(); }); it('startChat overrides model values', async () => { - const genModel = new GenerativeModel(fakeGenAI, { + const genModel = new GenerativeModel(fakeAI, { model: 'my-model', tools: [ { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } @@ -284,7 +284,7 @@ describe('GenerativeModel', () => { restore(); }); it('calls countTokens', async () => { - const genModel = new GenerativeModel(fakeGenAI, { model: 'my-model' }); + const genModel = new GenerativeModel(fakeAI, { model: 'my-model' }); const mockResponse = getMockResponse( 'vertexAI', 'unary-success-total-tokens.json' diff --git a/packages/vertexai/src/models/generative-model.ts b/packages/vertexai/src/models/generative-model.ts index 37880a794b7..2e7ed93eeb8 100644 --- a/packages/vertexai/src/models/generative-model.ts +++ b/packages/vertexai/src/models/generative-model.ts @@ -41,14 +41,14 @@ import { formatGenerateContentInput, formatSystemInstruction } from '../requests/request-helpers'; -import { GenAI } from '../public-types'; -import { GenAIModel } from './genai-model'; +import { AI } from '../public-types'; +import { AIModel } from './genai-model'; /** * Class for generative model APIs. * @public */ -export class GenerativeModel extends GenAIModel { +export class GenerativeModel extends AIModel { generationConfig: GenerationConfig; safetySettings: SafetySetting[]; requestOptions?: RequestOptions; @@ -57,11 +57,11 @@ export class GenerativeModel extends GenAIModel { systemInstruction?: Content; constructor( - genAI: GenAI, + ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions ) { - super(genAI, modelParams.model); + super(ai, modelParams.model); this.generationConfig = modelParams.generationConfig || {}; this.safetySettings = modelParams.safetySettings || []; this.tools = modelParams.tools; diff --git a/packages/vertexai/src/models/imagen-model.test.ts b/packages/vertexai/src/models/imagen-model.test.ts index e2a426a2416..f7a945d212d 100644 --- a/packages/vertexai/src/models/imagen-model.test.ts +++ b/packages/vertexai/src/models/imagen-model.test.ts @@ -20,19 +20,19 @@ import { ImagenAspectRatio, ImagenPersonFilterLevel, ImagenSafetyFilterLevel, - GenAI, - GenAIErrorCode + AI, + AIErrorCode } from '../public-types'; import * as request from '../requests/request'; import sinonChai from 'sinon-chai'; -import { GenAIError } from '../errors'; +import { AIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; import { match, restore, stub } from 'sinon'; import { vertexAIBackend } from '../api'; use(sinonChai); -const fakeGenAI: GenAI = { +const fakeAI: AI = { app: { name: 'DEFAULT', automaticDataCollectionEnabled: true, @@ -56,7 +56,7 @@ describe('ImagenModel', () => { mockResponse as Response ); - const imagenModel = new ImagenModel(fakeGenAI, { + const imagenModel = new ImagenModel(fakeAI, { model: 'my-model' }); const prompt = 'A photorealistic image of a toy boat at sea.'; @@ -77,7 +77,7 @@ describe('ImagenModel', () => { restore(); }); it('generateImages makes a request to predict with generation config and safety settings', async () => { - const imagenModel = new ImagenModel(fakeGenAI, { + const imagenModel = new ImagenModel(fakeAI, { model: 'my-model', generationConfig: { negativePrompt: 'do not hallucinate', @@ -148,15 +148,15 @@ describe('ImagenModel', () => { json: mockResponse.json } as Response); - const imagenModel = new ImagenModel(fakeGenAI, { + const imagenModel = new ImagenModel(fakeAI, { model: 'my-model' }); try { await imagenModel.generateImages('some inappropriate prompt.'); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); - expect((e as GenAIError).message).to.include('400'); - expect((e as GenAIError).message).to.include( + expect((e as AIError).code).to.equal(AIErrorCode.FETCH_ERROR); + expect((e as AIError).message).to.include('400'); + expect((e as AIError).message).to.include( "Image generation failed with the following error: The prompt could not be submitted. This prompt contains sensitive words that violate Google's Responsible AI practices. Try rephrasing the prompt. If you think this was an error, send feedback." ); } finally { diff --git a/packages/vertexai/src/models/imagen-model.ts b/packages/vertexai/src/models/imagen-model.ts index 9a36b3f6954..070b1d064ec 100644 --- a/packages/vertexai/src/models/imagen-model.ts +++ b/packages/vertexai/src/models/imagen-model.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { GenAI } from '../public-types'; +import { AI } from '../public-types'; import { Task, makeRequest } from '../requests/request'; import { createPredictRequestBody } from '../requests/request-helpers'; import { handlePredictResponse } from '../requests/response-helpers'; @@ -28,7 +28,7 @@ import { ImagenGenerationResponse, ImagenSafetySettings } from '../types'; -import { GenAIModel } from './genai-model'; +import { AIModel } from './genai-model'; /** * Class for Imagen model APIs. @@ -38,7 +38,7 @@ import { GenAIModel } from './genai-model'; * @example * ```javascript * const imagen = new ImagenModel( - * genAI, + * ai, * { * model: 'imagen-3.0-generate-002' * } @@ -52,7 +52,7 @@ import { GenAIModel } from './genai-model'; * * @beta */ -export class ImagenModel extends GenAIModel { +export class ImagenModel extends AIModel { /** * The Imagen generation configuration. */ @@ -65,7 +65,7 @@ export class ImagenModel extends GenAIModel { /** * Constructs a new instance of the {@link ImagenModel} class. * - * @param genAI - A {@link GenAI} instance. + * @param ai - an {@link AI} instance. * @param modelParams - Parameters to use when making requests to Imagen. * @param requestOptions - Additional options to use when making requests. * @@ -73,12 +73,12 @@ export class ImagenModel extends GenAIModel { * Firebase config. */ constructor( - genAI: GenAI, + ai: AI, modelParams: ImagenModelParams, public requestOptions?: RequestOptions ) { const { model, generationConfig, safetySettings } = modelParams; - super(genAI, model); + super(ai, model); this.generationConfig = generationConfig; this.safetySettings = safetySettings; } diff --git a/packages/vertexai/src/public-types.ts b/packages/vertexai/src/public-types.ts index 4dd7b3a53ba..3a9e62a5fa5 100644 --- a/packages/vertexai/src/public-types.ts +++ b/packages/vertexai/src/public-types.ts @@ -20,16 +20,16 @@ import { FirebaseApp } from '@firebase/app'; export * from './types'; /** - * An instance of the Vertex AI in Firebase SDK. + * An instance of the Firebase AI SDK. * - * For more information, refer to the documentation for the new {@link GenAI}. + * For more information, refer to the documentation for the new {@link AI}. * * @public */ -export type VertexAI = GenAI; +export type VertexAI = AI; /** - * Options when initializing the Vertex AI in Firebase SDK. + * Options when initializing the Firebase AI SDK. * * @public */ @@ -38,15 +38,15 @@ export interface VertexAIOptions { } /** - * An instance of the Firebase GenAI SDK. + * An instance of the Firebase AI SDK. * - * Do not create this instance directly. Instead, use {@link getGenAI | getGenAI()}. + * Do not create this instance directly. Instead, use {@link getAI | getAI()}. * * @public */ -export interface GenAI { +export interface AI { /** - * The {@link @firebase/app#FirebaseApp} this {@link GenAI} instance is associated with. + * The {@link @firebase/app#FirebaseApp} this {@link AI} instance is associated with. */ app: FirebaseApp; /** @@ -54,15 +54,15 @@ export interface GenAI { */ backend: Backend; /** - * The location configured for this GenAI service instance, relevant for Vertex AI backends. + * The location configured for this AI service instance, relevant for Vertex AI backends. * - * @deprecated use `GenAI.backend.location` instead. + * @deprecated use `AI.backend.location` instead. */ location: string; } /** - * Union type representing the backend configuration for the GenAI service. + * Union type representing the backend configuration for the AI service. * This can be either a {@link GoogleAIBackend} or a * {@link VertexAIBackend} configuration object. * @@ -75,8 +75,8 @@ export type Backend = GoogleAIBackend | VertexAIBackend; /** * Represents the configuration object for the Google AI backend. - * Use this with {@link GenAIOptions} when initializing the service with - * {@link getGenAI | getGenAI()}. + * Use this with {@link AIOptions} when initializing the service with + * {@link getAI | getAI()}. * Create an instance using {@link googleAIBackend | googleAIBackend()}. * * @public @@ -90,8 +90,8 @@ export type GoogleAIBackend = { /** * Represents the configuration object for the Vertex AI backend. - * Use this with {@link GenAIOptions} when initializing the server with - * {@link getGenAI | getGenAI() }. + * Use this with {@link AIOptions} when initializing the server with + * {@link getAI | getAI() }. * Create an instance using {@link vertexAIBackend | vertexAIBackend() } function. * * @public @@ -110,7 +110,7 @@ export type VertexAIBackend = { /** * An enum-like object containing constants that represent the supported backends - * for the Firebase GenAI SDK. + * for the Firebase AI SDK. * * These values are assigned to the `backendType` property within the specific backend * configuration objects ({@link GoogleAIBackend} or {@link VertexAIBackend}) to identify @@ -142,13 +142,13 @@ export const BackendType = { export type BackendType = (typeof BackendType)[keyof typeof BackendType]; /** - * Options interface for initializing the GenAI service using {@link getGenAI | getGenAI()}. + * Options interface for initializing the AI service using {@link getAI | getAI()}. * * @public */ -export interface GenAIOptions { +export interface AIOptions { /** - * The backend configuration to use for the GenAI service instance. + * The backend configuration to use for the AI service instance. * Use {@link googleAIBackend | googleAIBackend()} or * {@link vertexAIBackend | vertexAIBackend() } to create this configuration. */ diff --git a/packages/vertexai/src/requests/request-helpers.ts b/packages/vertexai/src/requests/request-helpers.ts index 411e61c991b..c4dd514f8a8 100644 --- a/packages/vertexai/src/requests/request-helpers.ts +++ b/packages/vertexai/src/requests/request-helpers.ts @@ -19,9 +19,9 @@ import { Content, GenerateContentRequest, Part, - GenAIErrorCode + AIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { AIError } from '../errors'; import { ImagenGenerationParams, PredictRequestBody } from '../types/internal'; export function formatSystemInstruction( @@ -87,15 +87,15 @@ function assignRoleToPartsAndValidateSendMessageRequest( } if (hasUserContent && hasFunctionContent) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, 'Within a single message, FunctionResponse cannot be mixed with other type of Part in the request for sending chat message.' ); } if (!hasUserContent && !hasFunctionContent) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, 'No Content is provided for sending chat message.' ); } diff --git a/packages/vertexai/src/requests/request.test.ts b/packages/vertexai/src/requests/request.test.ts index 62c0a230944..2efe72bdc60 100644 --- a/packages/vertexai/src/requests/request.test.ts +++ b/packages/vertexai/src/requests/request.test.ts @@ -22,8 +22,8 @@ import chaiAsPromised from 'chai-as-promised'; import { RequestUrl, Task, getHeaders, makeRequest } from './request'; import { ApiSettings } from '../types/internal'; import { DEFAULT_API_VERSION } from '../constants'; -import { GenAIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { AIErrorCode } from '../types'; +import { AIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; import { vertexAIBackend } from '../api'; @@ -310,12 +310,12 @@ describe('request methods', () => { } ); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); - expect((e as GenAIError).customErrorData?.status).to.equal(500); - expect((e as GenAIError).customErrorData?.statusText).to.equal( + expect((e as AIError).code).to.equal(AIErrorCode.FETCH_ERROR); + expect((e as AIError).customErrorData?.status).to.equal(500); + expect((e as AIError).customErrorData?.statusText).to.equal( 'AbortError' ); - expect((e as GenAIError).message).to.include('500 AbortError'); + expect((e as AIError).message).to.include('500 AbortError'); } expect(fetchStub).to.be.calledOnce; @@ -335,12 +335,12 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); - expect((e as GenAIError).customErrorData?.status).to.equal(500); - expect((e as GenAIError).customErrorData?.statusText).to.equal( + expect((e as AIError).code).to.equal(AIErrorCode.FETCH_ERROR); + expect((e as AIError).customErrorData?.status).to.equal(500); + expect((e as AIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as GenAIError).message).to.include('500 Server Error'); + expect((e as AIError).message).to.include('500 Server Error'); } expect(fetchStub).to.be.calledOnce; }); @@ -360,13 +360,13 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); - expect((e as GenAIError).customErrorData?.status).to.equal(500); - expect((e as GenAIError).customErrorData?.statusText).to.equal( + expect((e as AIError).code).to.equal(AIErrorCode.FETCH_ERROR); + expect((e as AIError).customErrorData?.status).to.equal(500); + expect((e as AIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as GenAIError).message).to.include('500 Server Error'); - expect((e as GenAIError).message).to.include('extra info'); + expect((e as AIError).message).to.include('500 Server Error'); + expect((e as AIError).message).to.include('extra info'); } expect(fetchStub).to.be.calledOnce; }); @@ -398,14 +398,14 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.FETCH_ERROR); - expect((e as GenAIError).customErrorData?.status).to.equal(500); - expect((e as GenAIError).customErrorData?.statusText).to.equal( + expect((e as AIError).code).to.equal(AIErrorCode.FETCH_ERROR); + expect((e as AIError).customErrorData?.status).to.equal(500); + expect((e as AIError).customErrorData?.statusText).to.equal( 'Server Error' ); - expect((e as GenAIError).message).to.include('500 Server Error'); - expect((e as GenAIError).message).to.include('extra info'); - expect((e as GenAIError).message).to.include( + expect((e as AIError).message).to.include('500 Server Error'); + expect((e as AIError).message).to.include('extra info'); + expect((e as AIError).message).to.include( 'generic::invalid_argument' ); } @@ -429,9 +429,9 @@ describe('request methods', () => { '' ); } catch (e) { - expect((e as GenAIError).code).to.equal(GenAIErrorCode.API_NOT_ENABLED); - expect((e as GenAIError).message).to.include('my-project'); - expect((e as GenAIError).message).to.include('googleapis.com'); + expect((e as AIError).code).to.equal(AIErrorCode.API_NOT_ENABLED); + expect((e as AIError).message).to.include('my-project'); + expect((e as AIError).message).to.include('googleapis.com'); } expect(fetchStub).to.be.calledOnce; }); diff --git a/packages/vertexai/src/requests/request.ts b/packages/vertexai/src/requests/request.ts index 64e299fc422..7b75d46a899 100644 --- a/packages/vertexai/src/requests/request.ts +++ b/packages/vertexai/src/requests/request.ts @@ -15,8 +15,8 @@ * limitations under the License. */ -import { ErrorDetails, RequestOptions, GenAIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { ErrorDetails, RequestOptions, AIErrorCode } from '../types'; +import { AIError } from '../errors'; import { ApiSettings } from '../types/internal'; import { DEFAULT_API_VERSION, @@ -191,9 +191,9 @@ export async function makeRequest( ) ) ) { - throw new GenAIError( - GenAIErrorCode.API_NOT_ENABLED, - `The Vertex AI in Firebase SDK requires the Vertex AI in Firebase ` + + throw new AIError( + AIErrorCode.API_NOT_ENABLED, + `The Firebase AI SDK requires the Firebase AI ` + `API ('firebasevertexai.googleapis.com') to be enabled in your ` + `Firebase project. Enable this API by visiting the Firebase Console ` + `at https://console.firebase.google.com/project/${url.apiSettings.project}/genai/ ` + @@ -207,8 +207,8 @@ export async function makeRequest( } ); } - throw new GenAIError( - GenAIErrorCode.FETCH_ERROR, + throw new AIError( + AIErrorCode.FETCH_ERROR, `Error fetching from ${url}: [${response.status} ${response.statusText}] ${message}`, { status: response.status, @@ -220,12 +220,12 @@ export async function makeRequest( } catch (e) { let err = e as Error; if ( - (e as GenAIError).code !== GenAIErrorCode.FETCH_ERROR && - (e as GenAIError).code !== GenAIErrorCode.API_NOT_ENABLED && + (e as AIError).code !== AIErrorCode.FETCH_ERROR && + (e as AIError).code !== AIErrorCode.API_NOT_ENABLED && e instanceof Error ) { - err = new GenAIError( - GenAIErrorCode.ERROR, + err = new AIError( + AIErrorCode.ERROR, `Error fetching from ${url.toString()}: ${e.message}` ); err.stack = e.stack; diff --git a/packages/vertexai/src/requests/response-helpers.ts b/packages/vertexai/src/requests/response-helpers.ts index db79abaf937..6a23ecb7f05 100644 --- a/packages/vertexai/src/requests/response-helpers.ts +++ b/packages/vertexai/src/requests/response-helpers.ts @@ -23,9 +23,9 @@ import { GenerateContentResponse, ImagenGCSImage, ImagenInlineImage, - GenAIErrorCode + AIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { AIError } from '../errors'; import { logger } from '../logger'; import { ImagenResponseInternal } from '../types/internal'; @@ -67,8 +67,8 @@ export function addHelpers( ); } if (hadBadFinishReason(response.candidates[0])) { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage( response )}. Response body stored in error.response`, @@ -79,8 +79,8 @@ export function addHelpers( } return getText(response); } else if (response.promptFeedback) { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, `Text not available. ${formatBlockErrorMessage(response)}`, { response @@ -99,8 +99,8 @@ export function addHelpers( ); } if (hadBadFinishReason(response.candidates[0])) { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, `Response error: ${formatBlockErrorMessage( response )}. Response body stored in error.response`, @@ -111,8 +111,8 @@ export function addHelpers( } return getFunctionCalls(response); } else if (response.promptFeedback) { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, `Function call not available. ${formatBlockErrorMessage(response)}`, { response @@ -217,8 +217,8 @@ export async function handlePredictResponse< // The backend should always send a non-empty array of predictions if the response was successful. if (!responseJson.predictions || responseJson.predictions?.length === 0) { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, 'No predictions or filtered reason received from Vertex AI. Please report this issue with the full error details at https://github.com/firebase/firebase-js-sdk/issues.' ); } @@ -237,8 +237,8 @@ export async function handlePredictResponse< gcsURI: prediction.gcsUri } as T); } else { - throw new GenAIError( - GenAIErrorCode.RESPONSE_ERROR, + throw new AIError( + AIErrorCode.RESPONSE_ERROR, `Predictions array in response has missing properties. Response: ${JSON.stringify( responseJson )}` diff --git a/packages/vertexai/src/requests/schema-builder.test.ts b/packages/vertexai/src/requests/schema-builder.test.ts index 8cef35ade8d..d05b81381ea 100644 --- a/packages/vertexai/src/requests/schema-builder.test.ts +++ b/packages/vertexai/src/requests/schema-builder.test.ts @@ -18,7 +18,7 @@ import { expect, use } from 'chai'; import sinonChai from 'sinon-chai'; import { Schema } from './schema-builder'; -import { GenAIErrorCode } from '../types'; +import { AIErrorCode } from '../types'; use(sinonChai); @@ -243,7 +243,7 @@ describe('Schema builder', () => { }, optionalProperties: ['cat'] }); - expect(() => schema.toJSON()).to.throw(GenAIErrorCode.INVALID_SCHEMA); + expect(() => schema.toJSON()).to.throw(AIErrorCode.INVALID_SCHEMA); }); }); diff --git a/packages/vertexai/src/requests/schema-builder.ts b/packages/vertexai/src/requests/schema-builder.ts index a729b0a6ebf..524cfdb1c20 100644 --- a/packages/vertexai/src/requests/schema-builder.ts +++ b/packages/vertexai/src/requests/schema-builder.ts @@ -15,8 +15,8 @@ * limitations under the License. */ -import { GenAIError } from '../errors'; -import { GenAIErrorCode } from '../types'; +import { AIError } from '../errors'; +import { AIErrorCode } from '../types'; import { SchemaInterface, SchemaType, @@ -266,8 +266,8 @@ export class ObjectSchema extends Schema { if (this.optionalProperties) { for (const propertyKey of this.optionalProperties) { if (!this.properties.hasOwnProperty(propertyKey)) { - throw new GenAIError( - GenAIErrorCode.INVALID_SCHEMA, + throw new AIError( + AIErrorCode.INVALID_SCHEMA, `Property "${propertyKey}" specified in "optionalProperties" does not exist.` ); } diff --git a/packages/vertexai/src/requests/stream-reader.test.ts b/packages/vertexai/src/requests/stream-reader.test.ts index 2e7041f064a..710372cef40 100644 --- a/packages/vertexai/src/requests/stream-reader.test.ts +++ b/packages/vertexai/src/requests/stream-reader.test.ts @@ -34,9 +34,9 @@ import { HarmCategory, HarmProbability, SafetyRating, - GenAIErrorCode + AIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { AIError } from '../errors'; import { ApiSettings } from '../types/internal'; import { vertexAIBackend } from '../api'; @@ -482,8 +482,8 @@ describe('aggregateResponses', () => { try { aggregateResponses(responsesToAggregate); } catch (e) { - expect((e as GenAIError).code).includes(GenAIErrorCode.INVALID_CONTENT); - expect((e as GenAIError).message).to.include( + expect((e as AIError).code).includes(AIErrorCode.INVALID_CONTENT); + expect((e as AIError).message).to.include( 'Part should have at least one property, but there are none. This is likely caused ' + 'by a malformed response from the backend.' ); diff --git a/packages/vertexai/src/requests/stream-reader.ts b/packages/vertexai/src/requests/stream-reader.ts index db1f99d7b27..23cc52299f3 100644 --- a/packages/vertexai/src/requests/stream-reader.ts +++ b/packages/vertexai/src/requests/stream-reader.ts @@ -21,9 +21,9 @@ import { GenerateContentResponse, GenerateContentStreamResult, Part, - GenAIErrorCode + AIErrorCode } from '../types'; -import { GenAIError } from '../errors'; +import { AIError } from '../errors'; import { createEnhancedContentResponse } from './response-helpers'; import * as GoogleAIMapper from '../googleAIMappers'; import { GoogleAIGenerateContentResponse } from '../types/googleAI'; @@ -122,8 +122,8 @@ export function getResponseStream( if (done) { if (currentText.trim()) { controller.error( - new GenAIError( - GenAIErrorCode.PARSE_FAILED, + new AIError( + AIErrorCode.PARSE_FAILED, 'Failed to parse stream' ) ); @@ -141,8 +141,8 @@ export function getResponseStream( parsedResponse = JSON.parse(match[1]); } catch (e) { controller.error( - new GenAIError( - GenAIErrorCode.PARSE_FAILED, + new AIError( + AIErrorCode.PARSE_FAILED, `Error parsing JSON response: "${match[1]}` ) ); @@ -220,8 +220,8 @@ export function aggregateResponses( newPart.functionCall = part.functionCall; } if (Object.keys(newPart).length === 0) { - throw new GenAIError( - GenAIErrorCode.INVALID_CONTENT, + throw new AIError( + AIErrorCode.INVALID_CONTENT, 'Part should have at least one property, but there are none. This is likely caused ' + 'by a malformed response from the backend.' ); diff --git a/packages/vertexai/src/service.test.ts b/packages/vertexai/src/service.test.ts index e76037971e5..11c78fbf15e 100644 --- a/packages/vertexai/src/service.test.ts +++ b/packages/vertexai/src/service.test.ts @@ -16,7 +16,7 @@ */ import { vertexAIBackend } from './api'; import { DEFAULT_LOCATION } from './constants'; -import { GenAIService } from './service'; +import { AIService } from './service'; import { expect } from 'chai'; const fakeApp = { @@ -28,19 +28,19 @@ const fakeApp = { } }; -describe('GenAIService', () => { +describe('AIService', () => { // TODO (dlarocque): move some of these tests to helpers.test.ts it('uses default location if not specified', () => { - const genAI = new GenAIService(fakeApp, vertexAIBackend()); - expect(genAI.location).to.equal(DEFAULT_LOCATION); + const ai = new AIService(fakeApp, vertexAIBackend()); + expect(ai.location).to.equal(DEFAULT_LOCATION); }); it('uses custom location if specified', () => { - const genAI = new GenAIService( + const ai = new AIService( fakeApp, vertexAIBackend('somewhere'), /* authProvider */ undefined, /* appCheckProvider */ undefined ); - expect(genAI.location).to.equal('somewhere'); + expect(ai.location).to.equal('somewhere'); }); }); diff --git a/packages/vertexai/src/service.ts b/packages/vertexai/src/service.ts index d793ef5acfc..a608de384b5 100644 --- a/packages/vertexai/src/service.ts +++ b/packages/vertexai/src/service.ts @@ -16,7 +16,7 @@ */ import { FirebaseApp, _FirebaseService } from '@firebase/app'; -import { Backend, GenAI } from './public-types'; +import { Backend, AI } from './public-types'; import { AppCheckInternalComponentName, FirebaseAppCheckInternal @@ -27,7 +27,7 @@ import { FirebaseAuthInternalName } from '@firebase/auth-interop-types'; -export class GenAIService implements GenAI, _FirebaseService { +export class AIService implements AI, _FirebaseService { auth: FirebaseAuthInternal | null; appCheck: FirebaseAppCheckInternal | null; location: string; // This is here for backwards-compatibility diff --git a/packages/vertexai/src/types/error.ts b/packages/vertexai/src/types/error.ts index c29bbe95284..ef3ad7fc30c 100644 --- a/packages/vertexai/src/types/error.ts +++ b/packages/vertexai/src/types/error.ts @@ -58,11 +58,11 @@ export interface CustomErrorData { } /** - * Standardized error codes that {@link GenAIError} can have. + * Standardized error codes that {@link AIError} can have. * * @public */ -export const enum GenAIErrorCode { +export const enum AIErrorCode { /** A generic error occurred. */ ERROR = 'error', diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts index f18a0ae0c48..33ed804bb9f 100644 --- a/packages/vertexai/src/types/requests.ts +++ b/packages/vertexai/src/types/requests.ts @@ -65,7 +65,7 @@ export interface SafetySetting { threshold: HarmBlockThreshold; /** * This property is not supported in Google AI. - * If this is a property on a {@link GenerateContentRequest} to be sent, a {@link GenAIError} + * If this is a property on a {@link GenerateContentRequest} to be sent, an {@link AIError} * will be thrown. */ method?: HarmBlockMethod; From 5a78b76f9ee9bc45fd9d74f46416d144b3effc2e Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 22 Apr 2025 13:41:15 -0400 Subject: [PATCH 03/16] Convert backend types to classes --- common/api-review/vertexai.api.md | 21 +----- config/.eslintrc.js | 3 +- packages/vertexai/src/api.test.ts | 38 ++-------- packages/vertexai/src/api.ts | 62 ++++++---------- packages/vertexai/src/backend.test.ts | 35 +++++++++ packages/vertexai/src/backend.ts | 72 +++++++++++++++++++ .../src/backwards-compatbility.test.ts | 6 +- packages/vertexai/src/errors.ts | 2 +- .../vertexai/src/methods/chat-session.test.ts | 4 +- .../vertexai/src/methods/count-tokens.test.ts | 8 +-- .../src/methods/generate-content.test.ts | 7 +- .../vertexai/src/models/genai-model.test.ts | 10 +-- packages/vertexai/src/models/genai-model.ts | 3 +- .../src/models/generative-model.test.ts | 4 +- .../vertexai/src/models/imagen-model.test.ts | 4 +- packages/vertexai/src/public-types.ts | 50 +------------ .../vertexai/src/requests/request-helpers.ts | 7 +- .../vertexai/src/requests/request.test.ts | 20 +++--- packages/vertexai/src/requests/request.ts | 11 ++- .../src/requests/stream-reader.test.ts | 4 +- .../vertexai/src/requests/stream-reader.ts | 5 +- packages/vertexai/src/service.test.ts | 6 +- packages/vertexai/src/service.ts | 5 +- packages/vertexai/src/types/internal.ts | 8 ++- scripts/update_vertexai_responses.sh | 2 +- 25 files changed, 194 insertions(+), 203 deletions(-) create mode 100644 packages/vertexai/src/backend.test.ts create mode 100644 packages/vertexai/src/backend.ts diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index cc1a0c3269c..3f6fa31992f 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -12,6 +12,7 @@ import { FirebaseError } from '@firebase/util'; // @public export interface AI { app: FirebaseApp; + // Warning: (ae-forgotten-export) The symbol "Backend" needs to be exported by the entry point index.d.ts backend: Backend; // @deprecated location: string; @@ -74,9 +75,6 @@ export class ArraySchema extends Schema { toJSON(): SchemaRequest; } -// @public -export type Backend = GoogleAIBackend | VertexAIBackend; - // @public export const BackendType: { readonly VERTEX_AI: "VERTEX_AI"; @@ -422,14 +420,6 @@ export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOp // @public export function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI; -// @public -export type GoogleAIBackend = { - backendType: typeof BackendType.GOOGLE_AI; -}; - -// @public -export function googleAIBackend(): GoogleAIBackend; - // @public @deprecated (undocumented) export interface GroundingAttribution { // (undocumented) @@ -859,15 +849,6 @@ export interface UsageMetadata { // @public export type VertexAI = AI; -// @public -export type VertexAIBackend = { - backendType: typeof BackendType.VERTEX_AI; - location: string; -}; - -// @public -export function vertexAIBackend(location?: string): VertexAIBackend; - // @public export const VertexAIError: typeof AIError; diff --git a/config/.eslintrc.js b/config/.eslintrc.js index 57243a3e2a4..aee4a839aaf 100644 --- a/config/.eslintrc.js +++ b/config/.eslintrc.js @@ -174,7 +174,8 @@ module.exports = { } } ], - '@typescript-eslint/consistent-type-definitions': ['error', 'interface'], + // We prefer using interfaces, but we need to use types for aliases like ' + // '@typescript-eslint/consistent-type-definitions': ['error', 'interface'], '@typescript-eslint/explicit-member-accessibility': [ 'error', { diff --git a/packages/vertexai/src/api.test.ts b/packages/vertexai/src/api.test.ts index bf85c557b1e..0554ff46441 100644 --- a/packages/vertexai/src/api.test.ts +++ b/packages/vertexai/src/api.test.ts @@ -16,17 +16,11 @@ */ import { ImagenModelParams, ModelParams, AIErrorCode } from './types'; import { AIError } from './errors'; -import { - ImagenModel, - getGenerativeModel, - getImagenModel, - googleAIBackend, - vertexAIBackend -} from './api'; +import { ImagenModel, getGenerativeModel, getImagenModel } from './api'; import { expect } from 'chai'; -import { BackendType, AI } from './public-types'; +import { AI } from './public-types'; import { GenerativeModel } from './models/generative-model'; -import { DEFAULT_LOCATION } from './constants'; +import { VertexAIBackend } from './backend'; const fakeAI: AI = { app: { @@ -38,7 +32,7 @@ const fakeAI: AI = { appId: 'my-appid' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; @@ -171,28 +165,4 @@ describe('Top level API', () => { expect(genModel).to.be.an.instanceOf(ImagenModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); - it('googleAIBackend returns a backend with backendType GOOGLE_AI', () => { - const backend = googleAIBackend(); - expect(backend.backendType).to.equal(BackendType.GOOGLE_AI); - }); - it('vertexAIBackend returns a backend with backendType VERTEX_AI', () => { - const backend = vertexAIBackend(); - expect(backend.backendType).to.equal(BackendType.VERTEX_AI); - expect(backend.location).to.equal(DEFAULT_LOCATION); - }); - it('vertexAIBackend sets custom location', () => { - const backend = vertexAIBackend('test-location'); - expect(backend.backendType).to.equal(BackendType.VERTEX_AI); - expect(backend.location).to.equal('test-location'); - }); - it('vertexAIBackend sets custom location even if empty string', () => { - const backend = vertexAIBackend(''); - expect(backend.backendType).to.equal(BackendType.VERTEX_AI); - expect(backend.location).to.equal(''); - }); - it('vertexAIBackend uses default location if location is null', () => { - const backend = vertexAIBackend(null as any); - expect(backend.backendType).to.equal(BackendType.VERTEX_AI); - expect(backend.location).to.equal(DEFAULT_LOCATION); - }); }); diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index d5e641a582c..b1eff5c7378 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -24,9 +24,7 @@ import { BackendType, AI, AIOptions, - GoogleAIBackend, VertexAI, - VertexAIBackend, VertexAIOptions } from './public-types'; import { @@ -38,6 +36,7 @@ import { import { AIError } from './errors'; import { AIModel, GenerativeModel, ImagenModel } from './models'; import { encodeInstanceIdentifier } from './helpers'; +import { GoogleAIBackend, VertexAIBackend } from './backend'; export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; @@ -72,7 +71,7 @@ declare module '@firebase/component' { /** * It is recommended to use the new {@link getAI | getAI()}. - * + * * Returns a {@link VertexAI} instance for the given app. * * @public @@ -109,13 +108,13 @@ export function getVertexAI( * @example * ```javascript * // Get an AI instance configured to use Google AI. - * const ai = getAI(app, { backend: googleAIBackend() }); + * const ai = getAI(app, { backend: new GoogleAIBackend() }); * ``` * * @example * ```javascript * // Get an AI instance configured to use Vertex AI. - * const ai = getAI(app, { backend: vertexAIBackend() }); + * const ai = getAI(app, { backend: new VertexAIBackend() }); * ``` * * @param app - The {@link @firebase/app#FirebaseApp} to use. @@ -126,52 +125,33 @@ export function getVertexAI( */ export function getAI( app: FirebaseApp = getApp(), - options: AIOptions = { backend: googleAIBackend() } + options: AIOptions = { backend: new GoogleAIBackend() } ): AI { app = getModularInstance(app); // Dependencies const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE); - const identifier = encodeInstanceIdentifier(options.backend); + let identifier: string; + if (options.backend instanceof GoogleAIBackend) { + identifier = encodeInstanceIdentifier({ + backendType: BackendType.GOOGLE_AI + }); + } else if (options.backend instanceof VertexAIBackend) { + identifier = encodeInstanceIdentifier({ + backendType: BackendType.VERTEX_AI, + location: options.backend.location ?? DEFAULT_LOCATION + }); + } else { + throw new AIError( + AIErrorCode.ERROR, + `Invalid backend type: ${options.backend.backendType}` + ); + } return AIProvider.getImmediate({ identifier }); } -/** - * Creates a {@link Backend} instance configured to use Google AI. - * - * @returns A {@link GoogleAIBackend} object. - * - * @public - */ -export function googleAIBackend(): GoogleAIBackend { - const backend: GoogleAIBackend = { - backendType: BackendType.GOOGLE_AI - }; - - return backend; -} - -/** - * Creates a {@link Backend} instance configured to use Vertex AI. - * - * @param location - The region identifier, defaulting to `us-central1`; - * see {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} - * for a list of supported locations. - * @returns A {@link VertexAIBackend} object. - * - * @public - */ -export function vertexAIBackend(location?: string): VertexAIBackend { - const backend: VertexAIBackend = { - backendType: BackendType.VERTEX_AI, - location: location ?? DEFAULT_LOCATION - }; - - return backend; -} - /** * Returns a {@link GenerativeModel} class with methods for inference * and other functionality. diff --git a/packages/vertexai/src/backend.test.ts b/packages/vertexai/src/backend.test.ts new file mode 100644 index 00000000000..837ba04519e --- /dev/null +++ b/packages/vertexai/src/backend.test.ts @@ -0,0 +1,35 @@ +import { expect } from "chai"; +import { GoogleAIBackend, VertexAIBackend } from "./backend"; +import { BackendType } from "./public-types"; +import { DEFAULT_LOCATION } from "./constants"; + +describe('Backend', () => { + describe('GoogleAIBackend', () => { + it('sets backendType to GOOGLE_AI', () => { + const backend = new GoogleAIBackend(); + expect(backend.backendType).to.equal(BackendType.GOOGLE_AI); + }); + }); + describe('VertexAIBackend', () => { + it('set backendType to VERTEX_AI', () => { + const backend = new VertexAIBackend(); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(DEFAULT_LOCATION); + }); + it('sets custom location', () => { + const backend = new VertexAIBackend('test-location'); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal('test-location'); + }); + it('sets custom location even if empty string', () => { + const backend = new VertexAIBackend(''); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(''); + }); + it('uses default location if location is null', () => { + const backend = new VertexAIBackend(null as any); + expect(backend.backendType).to.equal(BackendType.VERTEX_AI); + expect(backend.location).to.equal(DEFAULT_LOCATION); + }); + }); +}); \ No newline at end of file diff --git a/packages/vertexai/src/backend.ts b/packages/vertexai/src/backend.ts new file mode 100644 index 00000000000..40f0884712a --- /dev/null +++ b/packages/vertexai/src/backend.ts @@ -0,0 +1,72 @@ +import { DEFAULT_LOCATION } from "./constants"; +import { BackendType } from "./public-types"; + +/** + * Abstract base class representing the configuration for an AI service backend. + * This class should not be instantiated directly. Use its subclasses + * {@link GoogleAIBackend} or {@link VertexAIBackend}. + * + * @public + */ +export abstract class Backend { + /** + * Specifies the backend type (either 'GOOGLE_AI' or 'VERTEX_AI'). + */ + readonly backendType: BackendType; + + /** + * Protected constructor for use by subclasses. + * @param type - The specific backend type constant (e.g., BackendType.GOOGLE_AI). + */ + protected constructor(type: BackendType) { + this.backendType = type; + } +} + +/** + * Represents the configuration class for the Google AI backend. + * Use this with {@link AIOptions} when initializing the service with + * {@link getAI | getAI()}. + * + * @public + */ +export class GoogleAIBackend extends Backend { + /** + * Creates a configuration object for the Google AI backend. + */ + constructor() { + super(BackendType.GOOGLE_AI); + } +} + +/** + * Represents the configuration class for the Vertex AI backend. + * Use this with {@link AIOptions} when initializing the server with + * {@link getAI | getAI() }. + * + * @public + */ +export class VertexAIBackend extends Backend { + /** + * The region identifier. + * See {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + readonly location: string; + + /** + * Creates a configuration object for the Vertex AI backend. + * + * @param location - The region identifier, defaulting to `us-central1`; + * see {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} + * for a list of supported locations. + */ + constructor(location: string = DEFAULT_LOCATION) { + super(BackendType.VERTEX_AI); + if (location === null) { + this.location = DEFAULT_LOCATION; + } else { + this.location = location; + } + } +} \ No newline at end of file diff --git a/packages/vertexai/src/backwards-compatbility.test.ts b/packages/vertexai/src/backwards-compatbility.test.ts index 7f96328b305..62463009b24 100644 --- a/packages/vertexai/src/backwards-compatbility.test.ts +++ b/packages/vertexai/src/backwards-compatbility.test.ts @@ -24,10 +24,10 @@ import { VertexAIErrorCode, VertexAIModel, getGenerativeModel, - getImagenModel, - vertexAIBackend + getImagenModel } from './api'; import { AI, VertexAI, AIErrorCode } from './public-types'; +import { VertexAIBackend } from './backend'; function assertAssignable(): void {} @@ -41,7 +41,7 @@ const fakeAI: AI = { appId: 'app-id' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; diff --git a/packages/vertexai/src/errors.ts b/packages/vertexai/src/errors.ts index 85180fe8381..1746b815042 100644 --- a/packages/vertexai/src/errors.ts +++ b/packages/vertexai/src/errors.ts @@ -16,7 +16,7 @@ */ import { FirebaseError } from '@firebase/util'; -import { AIErrorCode as AIErrorCode, CustomErrorData } from './types'; +import { AIErrorCode, CustomErrorData } from './types'; import { VERTEX_TYPE } from './constants'; /** diff --git a/packages/vertexai/src/methods/chat-session.test.ts b/packages/vertexai/src/methods/chat-session.test.ts index cbfcd22e3e0..0564aa84ed6 100644 --- a/packages/vertexai/src/methods/chat-session.test.ts +++ b/packages/vertexai/src/methods/chat-session.test.ts @@ -23,7 +23,7 @@ import * as generateContentMethods from './generate-content'; import { GenerateContentStreamResult } from '../types'; import { ChatSession } from './chat-session'; import { ApiSettings } from '../types/internal'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; use(sinonChai); use(chaiAsPromised); @@ -33,7 +33,7 @@ const fakeApiSettings: ApiSettings = { project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: vertexAIBackend() + backend: new VertexAIBackend() }; describe('ChatSession', () => { diff --git a/packages/vertexai/src/methods/count-tokens.test.ts b/packages/vertexai/src/methods/count-tokens.test.ts index bdf99711f55..7ebaaf64683 100644 --- a/packages/vertexai/src/methods/count-tokens.test.ts +++ b/packages/vertexai/src/methods/count-tokens.test.ts @@ -25,8 +25,8 @@ import { countTokens } from './count-tokens'; import { CountTokensRequest } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; -import { googleAIBackend, vertexAIBackend } from '../api'; import { mapCountTokensRequest } from '../googleAIMappers'; +import { GoogleAIBackend, VertexAIBackend } from '../backend'; use(sinonChai); use(chaiAsPromised); @@ -36,15 +36,15 @@ const fakeApiSettings: ApiSettings = { project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: vertexAIBackend() + backend: new VertexAIBackend() }; const fakeGoogleAIApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', - location: 'us-central1', - backend: googleAIBackend() + location: '', + backend: new GoogleAIBackend() }; const fakeRequestParams: CountTokensRequest = { diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index b5ef144f76f..f796ef11676 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -31,8 +31,9 @@ import { } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; -import { AIError, googleAIBackend, vertexAIBackend } from '../api'; +import { AIError } from '../api'; import { mapGenerateContentRequest } from '../googleAIMappers'; +import { GoogleAIBackend, VertexAIBackend } from '../backend'; use(sinonChai); use(chaiAsPromised); @@ -42,7 +43,7 @@ const fakeApiSettings: ApiSettings = { project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: vertexAIBackend() + backend: new VertexAIBackend() }; const fakeGoogleAIApiSettings: ApiSettings = { @@ -50,7 +51,7 @@ const fakeGoogleAIApiSettings: ApiSettings = { project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: googleAIBackend() + backend: new GoogleAIBackend() }; const fakeRequestParams: GenerateContentRequest = { diff --git a/packages/vertexai/src/models/genai-model.test.ts b/packages/vertexai/src/models/genai-model.test.ts index d8db0bea2ec..229220d7a2a 100644 --- a/packages/vertexai/src/models/genai-model.test.ts +++ b/packages/vertexai/src/models/genai-model.test.ts @@ -19,7 +19,7 @@ import { AI, AIErrorCode } from '../public-types'; import sinonChai from 'sinon-chai'; import { AIModel } from './genai-model'; import { AIError } from '../errors'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; use(sinonChai); @@ -43,7 +43,7 @@ const fakeAI: AI = { appId: 'my-appid' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; @@ -76,7 +76,7 @@ describe('AIModel', () => { projectId: 'my-project' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; try { @@ -94,7 +94,7 @@ describe('AIModel', () => { apiKey: 'key' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; try { @@ -113,7 +113,7 @@ describe('AIModel', () => { projectId: 'my-project' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; try { diff --git a/packages/vertexai/src/models/genai-model.ts b/packages/vertexai/src/models/genai-model.ts index dd3b666d555..e059ff58530 100644 --- a/packages/vertexai/src/models/genai-model.ts +++ b/packages/vertexai/src/models/genai-model.ts @@ -76,8 +76,7 @@ export abstract class AIModel { apiKey: ai.app.options.apiKey, project: ai.app.options.projectId, appId: ai.app.options.appId, - automaticDataCollectionEnabled: - ai.app.automaticDataCollectionEnabled, + automaticDataCollectionEnabled: ai.app.automaticDataCollectionEnabled, location: ai.location, backend: ai.backend }; diff --git a/packages/vertexai/src/models/generative-model.test.ts b/packages/vertexai/src/models/generative-model.test.ts index 7f61070a52a..3ce7173e03e 100644 --- a/packages/vertexai/src/models/generative-model.test.ts +++ b/packages/vertexai/src/models/generative-model.test.ts @@ -21,7 +21,7 @@ import * as request from '../requests/request'; import { match, restore, stub } from 'sinon'; import { getMockResponse } from '../../test-utils/mock-response'; import sinonChai from 'sinon-chai'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; use(sinonChai); @@ -35,7 +35,7 @@ const fakeAI: AI = { appId: 'my-appid' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; diff --git a/packages/vertexai/src/models/imagen-model.test.ts b/packages/vertexai/src/models/imagen-model.test.ts index f7a945d212d..f4121e18f2d 100644 --- a/packages/vertexai/src/models/imagen-model.test.ts +++ b/packages/vertexai/src/models/imagen-model.test.ts @@ -28,7 +28,7 @@ import sinonChai from 'sinon-chai'; import { AIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; import { match, restore, stub } from 'sinon'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; use(sinonChai); @@ -42,7 +42,7 @@ const fakeAI: AI = { appId: 'my-appid' } }, - backend: vertexAIBackend('us-central1'), + backend: new VertexAIBackend('us-central1'), location: 'us-central1' }; diff --git a/packages/vertexai/src/public-types.ts b/packages/vertexai/src/public-types.ts index 3a9e62a5fa5..fb668a5b956 100644 --- a/packages/vertexai/src/public-types.ts +++ b/packages/vertexai/src/public-types.ts @@ -16,6 +16,7 @@ */ import { FirebaseApp } from '@firebase/app'; +import { Backend } from './backend'; export * from './types'; @@ -61,53 +62,6 @@ export interface AI { location: string; } -/** - * Union type representing the backend configuration for the AI service. - * This can be either a {@link GoogleAIBackend} or a - * {@link VertexAIBackend} configuration object. - * - * Create instances using {@link googleAIBackend | googleAIBackend() } or - * {@link vertexAIBackend | vertexAIBackend() }. - * - * @public - */ -export type Backend = GoogleAIBackend | VertexAIBackend; - -/** - * Represents the configuration object for the Google AI backend. - * Use this with {@link AIOptions} when initializing the service with - * {@link getAI | getAI()}. - * Create an instance using {@link googleAIBackend | googleAIBackend()}. - * - * @public - */ -export type GoogleAIBackend = { - /** - * Specifies the backend type as Google AI. - */ - backendType: typeof BackendType.GOOGLE_AI; -}; - -/** - * Represents the configuration object for the Vertex AI backend. - * Use this with {@link AIOptions} when initializing the server with - * {@link getAI | getAI() }. - * Create an instance using {@link vertexAIBackend | vertexAIBackend() } function. - * - * @public - */ -export type VertexAIBackend = { - /** - * Specifies the backend type as Vertex AI. - */ - backendType: typeof BackendType.VERTEX_AI; - /** - * The region identifier, defaulting to `us-central1`; see {@link https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations | Vertex AI locations} - * for a list of supported locations. - */ - location: string; -}; - /** * An enum-like object containing constants that represent the supported backends * for the Firebase AI SDK. @@ -149,8 +103,6 @@ export type BackendType = (typeof BackendType)[keyof typeof BackendType]; export interface AIOptions { /** * The backend configuration to use for the AI service instance. - * Use {@link googleAIBackend | googleAIBackend()} or - * {@link vertexAIBackend | vertexAIBackend() } to create this configuration. */ backend: Backend; } diff --git a/packages/vertexai/src/requests/request-helpers.ts b/packages/vertexai/src/requests/request-helpers.ts index c4dd514f8a8..c4cc1a20acc 100644 --- a/packages/vertexai/src/requests/request-helpers.ts +++ b/packages/vertexai/src/requests/request-helpers.ts @@ -15,12 +15,7 @@ * limitations under the License. */ -import { - Content, - GenerateContentRequest, - Part, - AIErrorCode -} from '../types'; +import { Content, GenerateContentRequest, Part, AIErrorCode } from '../types'; import { AIError } from '../errors'; import { ImagenGenerationParams, PredictRequestBody } from '../types/internal'; diff --git a/packages/vertexai/src/requests/request.test.ts b/packages/vertexai/src/requests/request.test.ts index 2efe72bdc60..0d162906fdc 100644 --- a/packages/vertexai/src/requests/request.test.ts +++ b/packages/vertexai/src/requests/request.test.ts @@ -25,7 +25,7 @@ import { DEFAULT_API_VERSION } from '../constants'; import { AIErrorCode } from '../types'; import { AIError } from '../errors'; import { getMockResponse } from '../../test-utils/mock-response'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; use(sinonChai); use(chaiAsPromised); @@ -35,7 +35,7 @@ const fakeApiSettings: ApiSettings = { project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: vertexAIBackend() + backend: new VertexAIBackend() }; describe('request methods', () => { @@ -108,7 +108,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend(), + backend: new VertexAIBackend(), getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) }; @@ -135,7 +135,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend(), + backend: new VertexAIBackend(), automaticDataCollectionEnabled: true, getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) @@ -160,7 +160,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend(), + backend: new VertexAIBackend(), automaticDataCollectionEnabled: false, getAuthToken: () => Promise.resolve({ accessToken: 'authtoken' }), getAppCheckToken: () => Promise.resolve({ token: 'appchecktoken' }) @@ -188,7 +188,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend() + backend: new VertexAIBackend() }, true, {} @@ -222,7 +222,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend(), + backend: new VertexAIBackend(), getAppCheckToken: () => Promise.resolve({ token: 'dummytoken', error: Error('oops') }) }, @@ -250,7 +250,7 @@ describe('request methods', () => { project: 'myproject', appId: 'my-appid', location: 'moon', - backend: vertexAIBackend() + backend: new VertexAIBackend() }, true, {} @@ -405,9 +405,7 @@ describe('request methods', () => { ); expect((e as AIError).message).to.include('500 Server Error'); expect((e as AIError).message).to.include('extra info'); - expect((e as AIError).message).to.include( - 'generic::invalid_argument' - ); + expect((e as AIError).message).to.include('generic::invalid_argument'); } expect(fetchStub).to.be.calledOnce; }); diff --git a/packages/vertexai/src/requests/request.ts b/packages/vertexai/src/requests/request.ts index 7b75d46a899..7ff51d71e3d 100644 --- a/packages/vertexai/src/requests/request.ts +++ b/packages/vertexai/src/requests/request.ts @@ -26,7 +26,7 @@ import { PACKAGE_VERSION } from '../constants'; import { logger } from '../logger'; -import { BackendType } from '../public-types'; +import { GoogleAIBackend, VertexAIBackend } from '../backend'; export enum Task { GENERATE_CONTENT = 'generateContent', @@ -59,10 +59,15 @@ export class RequestUrl { } private get modelPath(): string { - if (this.apiSettings.backend.backendType === BackendType.GOOGLE_AI) { + if (this.apiSettings.backend instanceof GoogleAIBackend) { return `projects/${this.apiSettings.project}/${this.model}`; - } else { + } else if (this.apiSettings.backend instanceof VertexAIBackend) { return `projects/${this.apiSettings.project}/locations/${this.apiSettings.backend.location}/${this.model}`; + } else { + throw new AIError( + AIErrorCode.ERROR, + `Invalid backend: ${this.apiSettings.backend}` + ); } } diff --git a/packages/vertexai/src/requests/stream-reader.test.ts b/packages/vertexai/src/requests/stream-reader.test.ts index 710372cef40..ea832c7816f 100644 --- a/packages/vertexai/src/requests/stream-reader.test.ts +++ b/packages/vertexai/src/requests/stream-reader.test.ts @@ -38,14 +38,14 @@ import { } from '../types'; import { AIError } from '../errors'; import { ApiSettings } from '../types/internal'; -import { vertexAIBackend } from '../api'; +import { VertexAIBackend } from '../backend'; const fakeApiSettings: ApiSettings = { apiKey: 'key', project: 'my-project', appId: 'my-appid', location: 'us-central1', - backend: vertexAIBackend() + backend: new VertexAIBackend() }; use(sinonChai); diff --git a/packages/vertexai/src/requests/stream-reader.ts b/packages/vertexai/src/requests/stream-reader.ts index 23cc52299f3..1b762947393 100644 --- a/packages/vertexai/src/requests/stream-reader.ts +++ b/packages/vertexai/src/requests/stream-reader.ts @@ -122,10 +122,7 @@ export function getResponseStream( if (done) { if (currentText.trim()) { controller.error( - new AIError( - AIErrorCode.PARSE_FAILED, - 'Failed to parse stream' - ) + new AIError(AIErrorCode.PARSE_FAILED, 'Failed to parse stream') ); return; } diff --git a/packages/vertexai/src/service.test.ts b/packages/vertexai/src/service.test.ts index 11c78fbf15e..ba4c736e810 100644 --- a/packages/vertexai/src/service.test.ts +++ b/packages/vertexai/src/service.test.ts @@ -14,7 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -import { vertexAIBackend } from './api'; +import { VertexAIBackend } from './backend'; import { DEFAULT_LOCATION } from './constants'; import { AIService } from './service'; import { expect } from 'chai'; @@ -31,13 +31,13 @@ const fakeApp = { describe('AIService', () => { // TODO (dlarocque): move some of these tests to helpers.test.ts it('uses default location if not specified', () => { - const ai = new AIService(fakeApp, vertexAIBackend()); + const ai = new AIService(fakeApp, new VertexAIBackend()); expect(ai.location).to.equal(DEFAULT_LOCATION); }); it('uses custom location if specified', () => { const ai = new AIService( fakeApp, - vertexAIBackend('somewhere'), + new VertexAIBackend('somewhere'), /* authProvider */ undefined, /* appCheckProvider */ undefined ); diff --git a/packages/vertexai/src/service.ts b/packages/vertexai/src/service.ts index a608de384b5..006cc45a94e 100644 --- a/packages/vertexai/src/service.ts +++ b/packages/vertexai/src/service.ts @@ -16,7 +16,7 @@ */ import { FirebaseApp, _FirebaseService } from '@firebase/app'; -import { Backend, AI } from './public-types'; +import { AI } from './public-types'; import { AppCheckInternalComponentName, FirebaseAppCheckInternal @@ -26,6 +26,7 @@ import { FirebaseAuthInternal, FirebaseAuthInternalName } from '@firebase/auth-interop-types'; +import { Backend, VertexAIBackend } from './backend'; export class AIService implements AI, _FirebaseService { auth: FirebaseAuthInternal | null; @@ -43,7 +44,7 @@ export class AIService implements AI, _FirebaseService { this.auth = auth || null; this.appCheck = appCheck || null; - if (backend.backendType === 'VERTEX_AI') { + if (backend instanceof VertexAIBackend) { this.location = backend.location; } else { this.location = ''; diff --git a/packages/vertexai/src/types/internal.ts b/packages/vertexai/src/types/internal.ts index 4303d4c07d8..4dde95d520e 100644 --- a/packages/vertexai/src/types/internal.ts +++ b/packages/vertexai/src/types/internal.ts @@ -17,7 +17,8 @@ import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; -import { Backend } from '../public-types'; +import { Backend } from '../backend'; +import { BackendType } from '../public-types'; export * from './imagen/internal'; @@ -35,4 +36,7 @@ export interface ApiSettings { getAppCheckToken?: () => Promise; } -export type InstanceIdentifier = Backend; +export interface InstanceIdentifier { + backendType: BackendType; + location?: string; +} diff --git a/scripts/update_vertexai_responses.sh b/scripts/update_vertexai_responses.sh index de55ac176ce..bf55a645a66 100755 --- a/scripts/update_vertexai_responses.sh +++ b/scripts/update_vertexai_responses.sh @@ -17,7 +17,7 @@ # This script replaces mock response files for Vertex AI unit tests with a fresh # clone of the shared repository of Vertex AI test data. -RESPONSES_VERSION='v8.*' # The major version of mock responses to use +RESPONSES_VERSION='v10.*' # The major version of mock responses to use REPO_NAME="vertexai-sdk-test-data" REPO_LINK="https://github.com/FirebaseExtended/$REPO_NAME.git" From 02600d0604dd5e1e85075a1b3ace6e23e6b9e23d Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 22 Apr 2025 13:53:37 -0400 Subject: [PATCH 04/16] Cleanup --- packages/vertexai/src/googleAIMappers.test.ts | 15 ++++++--------- packages/vertexai/src/googleAIMappers.ts | 7 ++----- packages/vertexai/src/helpers.test.ts | 1 - packages/vertexai/test-utils/mock-response.ts | 3 --- 4 files changed, 8 insertions(+), 18 deletions(-) diff --git a/packages/vertexai/src/googleAIMappers.test.ts b/packages/vertexai/src/googleAIMappers.test.ts index 6cf7425a293..a30fcdc3e7b 100644 --- a/packages/vertexai/src/googleAIMappers.test.ts +++ b/packages/vertexai/src/googleAIMappers.test.ts @@ -92,7 +92,7 @@ describe('Google AI Mappers', () => { }; const mappedRequest = mapGenerateContentRequest(request); expect(loggerWarnStub).to.have.been.calledOnceWith( - 'topK in GenerationConfig has been rounded to the nearest integer.' + 'topK in GenerationConfig has been rounded to the nearest integer to match the format for Google AI requests.' ); expect(mappedRequest.generationConfig?.topK).to.equal(16); }); @@ -133,12 +133,8 @@ describe('Google AI Mappers', () => { const googleAIMockResponse: GoogleAIGenerateContentResponse = await ( getMockResponse('googleAI', 'unary-success-citations.txt') as Response ).json(); - console.log(JSON.stringify(googleAIMockResponse)); const mappedResponse = mapGenerateContentResponse(googleAIMockResponse); - console.log(JSON.stringify(googleAIMockResponse)); - console.log(JSON.stringify(mappedResponse)); - expect(mappedResponse.candidates).to.exist; expect(mappedResponse.candidates?.[0].content.parts[0].text).to.contain( 'quantum mechanics' @@ -242,16 +238,17 @@ describe('Google AI Mappers', () => { it('should map a minimal Vertex AI CountTokensRequest', () => { const vertexRequest: CountTokensRequest = { - contents: fakeContents + contents: fakeContents, + systemInstruction: { role: 'system', parts: [{ text: 'Be nice' }] }, + generationConfig: { temperature: 0.8 } }; const expectedGoogleAIRequest: GoogleAICountTokensRequest = { generateContentRequest: { model: fakeModel, contents: vertexRequest.contents, - systemInstruction: undefined, - tools: undefined, - generationConfig: undefined + systemInstruction: { role: 'system', parts: [{ text: 'Be nice' }] }, + generationConfig: { temperature: 0.8 } } }; diff --git a/packages/vertexai/src/googleAIMappers.ts b/packages/vertexai/src/googleAIMappers.ts index af278bef782..290659f1f8d 100644 --- a/packages/vertexai/src/googleAIMappers.ts +++ b/packages/vertexai/src/googleAIMappers.ts @@ -76,7 +76,7 @@ export function mapGenerateContentRequest( if (roundedTopK !== generateContentRequest.generationConfig.topK) { logger.warn( - 'topK in GenerationConfig has been rounded to the nearest integer.' + 'topK in GenerationConfig has been rounded to the nearest integer to match the format for Google AI requests.' ); generateContentRequest.generationConfig.topK = roundedTopK; } @@ -126,10 +126,7 @@ export function mapCountTokensRequest( const mappedCountTokensRequest: GoogleAICountTokensRequest = { generateContentRequest: { model, - contents: countTokensRequest.contents, - systemInstruction: countTokensRequest.systemInstruction, - tools: countTokensRequest.tools, - generationConfig: countTokensRequest.generationConfig + ...countTokensRequest } }; diff --git a/packages/vertexai/src/helpers.test.ts b/packages/vertexai/src/helpers.test.ts index 7ed8f6a754a..5fcf954ac37 100644 --- a/packages/vertexai/src/helpers.test.ts +++ b/packages/vertexai/src/helpers.test.ts @@ -29,7 +29,6 @@ describe('Identifier Encoding/Decoding', () => { backendType: BackendType.VERTEX_AI, location: 'us-central1' }; - console.log(identifier); const expected = `${AI_TYPE}/vertexai/us-central1`; expect(encodeInstanceIdentifier(identifier)).to.equal(expected); }); diff --git a/packages/vertexai/test-utils/mock-response.ts b/packages/vertexai/test-utils/mock-response.ts index c108704d615..5128ddabe74 100644 --- a/packages/vertexai/test-utils/mock-response.ts +++ b/packages/vertexai/test-utils/mock-response.ts @@ -72,9 +72,6 @@ export function getMockResponse( filename: string ): Partial { const mocksLookup = mockSetMaps[backendName]; - if (backendName === 'googleAI') { - console.log(Object.keys(mocksLookup)); - } if (!(filename in mocksLookup)) { throw Error(`${backendName} mock response file '${filename}' not found.`); } From ff625005d13dd91342f3e43f8ecef5c003807799 Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 22 Apr 2025 14:42:10 -0400 Subject: [PATCH 05/16] Move GoogleAI types to single file --- common/api-review/vertexai.api.md | 60 +++++++++++++++++-- ...ppers.test.ts => googleai-mappers.test.ts} | 14 ++--- ...googleAIMappers.ts => googleai-mappers.ts} | 24 +++----- .../vertexai/src/methods/count-tokens.test.ts | 2 +- packages/vertexai/src/methods/count-tokens.ts | 2 +- .../src/methods/generate-content.test.ts | 2 +- .../vertexai/src/methods/generate-content.ts | 6 +- packages/vertexai/src/public-types.ts | 2 +- .../vertexai/src/requests/stream-reader.ts | 4 +- packages/vertexai/src/types/googleAI/index.ts | 19 ------ .../vertexai/src/types/googleAI/requests.ts | 29 --------- .../vertexai/src/types/googleAI/responses.ts | 46 -------------- packages/vertexai/src/types/googleai.ts | 44 ++++++++++++++ packages/vertexai/src/types/index.ts | 1 + packages/vertexai/src/types/responses.ts | 31 ++++++++-- 15 files changed, 147 insertions(+), 139 deletions(-) rename packages/vertexai/src/{googleAIMappers.test.ts => googleai-mappers.test.ts} (94%) rename packages/vertexai/src/{googleAIMappers.ts => googleai-mappers.ts} (90%) delete mode 100644 packages/vertexai/src/types/googleAI/index.ts delete mode 100644 packages/vertexai/src/types/googleAI/requests.ts delete mode 100644 packages/vertexai/src/types/googleAI/responses.ts create mode 100644 packages/vertexai/src/types/googleai.ts diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index 3f6fa31992f..c4923bc52f3 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -125,11 +125,9 @@ export interface Citation { endIndex?: number; // (undocumented) license?: string; - // (undocumented) publicationDate?: Date_2; // (undocumented) startIndex?: number; - // (undocumented) title?: string; // (undocumented) uri?: string; @@ -420,6 +418,60 @@ export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOp // @public export function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI; +// Warning: (ae-internal-missing-underscore) The name "GoogleAICitationMetadata" should be prefixed with an underscore because the declaration is marked as @internal +// +// @internal (undocumented) +export interface GoogleAICitationMetadata { + // (undocumented) + citationSources: Citation[]; +} + +// Warning: (ae-internal-missing-underscore) The name "GoogleAICountTokensRequest" should be prefixed with an underscore because the declaration is marked as @internal +// +// @internal (undocumented) +export interface GoogleAICountTokensRequest { + // (undocumented) + generateContentRequest: { + model: string; + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} + +// Warning: (ae-internal-missing-underscore) The name "GoogleAIGenerateContentCandidate" should be prefixed with an underscore because the declaration is marked as @internal +// +// @internal (undocumented) +export interface GoogleAIGenerateContentCandidate { + // (undocumented) + citationMetadata?: GoogleAICitationMetadata; + // (undocumented) + content: Content; + // (undocumented) + finishMessage?: string; + // (undocumented) + finishReason?: FinishReason; + // (undocumented) + groundingMetadata?: GroundingMetadata; + // (undocumented) + index: number; + // (undocumented) + safetyRatings?: SafetyRating[]; +} + +// Warning: (ae-internal-missing-underscore) The name "GoogleAIGenerateContentResponse" should be prefixed with an underscore because the declaration is marked as @internal +// +// @internal (undocumented) +export interface GoogleAIGenerateContentResponse { + // (undocumented) + candidates?: GoogleAIGenerateContentCandidate[]; + // (undocumented) + promptFeedback?: PromptFeedback; + // (undocumented) + usageMetadata?: UsageMetadata; +} + // @public @deprecated (undocumented) export interface GroundingAttribution { // (undocumented) @@ -653,7 +705,6 @@ export const POSSIBLE_ROLES: readonly ["user", "model", "function", "system"]; export interface PromptFeedback { // (undocumented) blockReason?: BlockReason; - // (undocumented) blockReasonMessage?: string; // (undocumented) safetyRatings: SafetyRating[]; @@ -684,11 +735,8 @@ export interface SafetyRating { category: HarmCategory; // (undocumented) probability: HarmProbability; - // (undocumented) probabilityScore: number; - // (undocumented) severity: HarmSeverity; - // (undocumented) severityScore: number; } diff --git a/packages/vertexai/src/googleAIMappers.test.ts b/packages/vertexai/src/googleai-mappers.test.ts similarity index 94% rename from packages/vertexai/src/googleAIMappers.test.ts rename to packages/vertexai/src/googleai-mappers.test.ts index a30fcdc3e7b..9c3bb0f9241 100644 --- a/packages/vertexai/src/googleAIMappers.test.ts +++ b/packages/vertexai/src/googleai-mappers.test.ts @@ -24,7 +24,7 @@ import { mapGenerateContentRequest, mapGenerateContentResponse, mapPromptFeedback -} from './googleAIMappers'; +} from './googleai-mappers'; import { BlockReason, Content, @@ -44,7 +44,7 @@ import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest -} from './types/googleAI'; +} from './types/googleai'; import { logger } from './logger'; import { AIError } from './errors'; import { getMockResponse } from '../test-utils/mock-response'; @@ -281,7 +281,7 @@ describe('Google AI Mappers', () => { .undefined; // Not in Google AI }); - it('should add default safety rating properties and warn', () => { + it('should add default safety rating properties', () => { const candidates: GoogleAIGenerateContentCandidate[] = [ { index: 0, @@ -297,9 +297,6 @@ describe('Google AI Mappers', () => { } ]; const mapped = mapGenerateContentCandidates(candidates); - expect(loggerWarnStub).to.have.been.calledOnceWith( - "Candidate safety rating properties 'severity', 'severityScore', and 'probabilityScore' are not included in responses from Google AI. Properties have been assigned to default values." - ); expect(mapped[0].safetyRatings).to.exist; const safetyRating = mapped[0].safetyRatings?.[0] as SafetyRating; // Type assertion expect(safetyRating.severity).to.equal( @@ -359,7 +356,7 @@ describe('Google AI Mappers', () => { }); describe('mapPromptFeedback', () => { - it('should add default safety rating properties and warn', () => { + it('should add default safety rating properties', () => { const feedback: PromptFeedback = { blockReason: BlockReason.OTHER, safetyRatings: [ @@ -373,9 +370,6 @@ describe('Google AI Mappers', () => { // Missing blockReasonMessage }; const mapped = mapPromptFeedback(feedback); - expect(loggerWarnStub).to.have.been.calledOnceWith( - "PromptFeedback safety ratings' properties severity, severityScore, and probabilityScore are not included in responses from Google AI. Properties have been assigned to default values." - ); expect(mapped.safetyRatings).to.exist; const safetyRating = mapped.safetyRatings[0] as SafetyRating; // Type assertion expect(safetyRating.severity).to.equal( diff --git a/packages/vertexai/src/googleAIMappers.ts b/packages/vertexai/src/googleai-mappers.ts similarity index 90% rename from packages/vertexai/src/googleAIMappers.ts rename to packages/vertexai/src/googleai-mappers.ts index 290659f1f8d..ede91fe5756 100644 --- a/packages/vertexai/src/googleAIMappers.ts +++ b/packages/vertexai/src/googleai-mappers.ts @@ -33,7 +33,7 @@ import { GoogleAIGenerateContentResponse, GoogleAIGenerateContentCandidate, GoogleAICountTokensRequest -} from './types/googleAI'; +} from './types/googleai'; /** * This SDK supports both Vertex AI and Google AI APIs. @@ -159,17 +159,14 @@ export function mapGenerateContentCandidates( }; } - // Assign missing candidate SafetyRatings properties to their defaults. + // Assign missing candidate SafetyRatings properties to their defaults if undefined. if (candidate.safetyRatings) { - logger.warn( - "Candidate safety rating properties 'severity', 'severityScore', and 'probabilityScore' are not included in responses from Google AI. Properties have been assigned to default values." - ); mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { return { ...safetyRating, - severity: HarmSeverity.HARM_SEVERITY_UNSUPPORTED, - probabilityScore: 0, - severityScore: 0 + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0 }; }); } @@ -207,21 +204,18 @@ export function mapGenerateContentCandidates( export function mapPromptFeedback( promptFeedback: PromptFeedback ): PromptFeedback { - // Assign missing PromptFeedback SafetyRatings properties to their defaults. + // Assign missing SafetyRating properties to their defaults if undefined. const mappedSafetyRatings: SafetyRating[] = []; promptFeedback.safetyRatings.forEach(safetyRating => { mappedSafetyRatings.push({ category: safetyRating.category, probability: safetyRating.probability, - severity: HarmSeverity.HARM_SEVERITY_UNSUPPORTED, - probabilityScore: 0, - severityScore: 0, + severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + probabilityScore: safetyRating.probabilityScore ?? 0, + severityScore: safetyRating.severityScore ?? 0, blocked: safetyRating.blocked }); }); - logger.warn( - "PromptFeedback safety ratings' properties severity, severityScore, and probabilityScore are not included in responses from Google AI. Properties have been assigned to default values." - ); const mappedPromptFeedback: PromptFeedback = { blockReason: promptFeedback.blockReason, diff --git a/packages/vertexai/src/methods/count-tokens.test.ts b/packages/vertexai/src/methods/count-tokens.test.ts index 7ebaaf64683..7e04ddb3561 100644 --- a/packages/vertexai/src/methods/count-tokens.test.ts +++ b/packages/vertexai/src/methods/count-tokens.test.ts @@ -25,7 +25,7 @@ import { countTokens } from './count-tokens'; import { CountTokensRequest } from '../types'; import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; -import { mapCountTokensRequest } from '../googleAIMappers'; +import { mapCountTokensRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; use(sinonChai); diff --git a/packages/vertexai/src/methods/count-tokens.ts b/packages/vertexai/src/methods/count-tokens.ts index 3a94f181126..b1e60e3a182 100644 --- a/packages/vertexai/src/methods/count-tokens.ts +++ b/packages/vertexai/src/methods/count-tokens.ts @@ -22,7 +22,7 @@ import { } from '../types'; import { Task, makeRequest } from '../requests/request'; import { ApiSettings } from '../types/internal'; -import * as GoogleAIMapper from '../googleAIMappers'; +import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; export async function countTokens( diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index f796ef11676..13250fd83dd 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -32,7 +32,7 @@ import { import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; import { AIError } from '../api'; -import { mapGenerateContentRequest } from '../googleAIMappers'; +import { mapGenerateContentRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; use(sinonChai); diff --git a/packages/vertexai/src/methods/generate-content.ts b/packages/vertexai/src/methods/generate-content.ts index f05ca41c0bc..5f7902f5954 100644 --- a/packages/vertexai/src/methods/generate-content.ts +++ b/packages/vertexai/src/methods/generate-content.ts @@ -26,7 +26,7 @@ import { Task, makeRequest } from '../requests/request'; import { createEnhancedContentResponse } from '../requests/response-helpers'; import { processStream } from '../requests/stream-reader'; import { ApiSettings } from '../types/internal'; -import * as GoogleAIMapper from '../googleAIMappers'; +import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; export async function generateContentStream( @@ -66,7 +66,7 @@ export async function generateContent( JSON.stringify(params), requestOptions ); - const generateContentResponse = await handleGenerateContentResponse( + const generateContentResponse = await processGenerateContentResponse( response, apiSettings ); @@ -78,7 +78,7 @@ export async function generateContent( }; } -async function handleGenerateContentResponse( +async function processGenerateContentResponse( response: Response, apiSettings: ApiSettings ): Promise { diff --git a/packages/vertexai/src/public-types.ts b/packages/vertexai/src/public-types.ts index fb668a5b956..6d51c6d38c2 100644 --- a/packages/vertexai/src/public-types.ts +++ b/packages/vertexai/src/public-types.ts @@ -23,7 +23,7 @@ export * from './types'; /** * An instance of the Firebase AI SDK. * - * For more information, refer to the documentation for the new {@link AI}. + * For more information, refer to the documentation for the new {@link AI} interface. * * @public */ diff --git a/packages/vertexai/src/requests/stream-reader.ts b/packages/vertexai/src/requests/stream-reader.ts index 1b762947393..543d1d02266 100644 --- a/packages/vertexai/src/requests/stream-reader.ts +++ b/packages/vertexai/src/requests/stream-reader.ts @@ -25,8 +25,8 @@ import { } from '../types'; import { AIError } from '../errors'; import { createEnhancedContentResponse } from './response-helpers'; -import * as GoogleAIMapper from '../googleAIMappers'; -import { GoogleAIGenerateContentResponse } from '../types/googleAI'; +import * as GoogleAIMapper from '../googleai-mappers'; +import { GoogleAIGenerateContentResponse } from '../types/googleai'; import { ApiSettings } from '../types/internal'; import { BackendType } from '../public-types'; diff --git a/packages/vertexai/src/types/googleAI/index.ts b/packages/vertexai/src/types/googleAI/index.ts deleted file mode 100644 index 546c64f13b1..00000000000 --- a/packages/vertexai/src/types/googleAI/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export * from './requests'; -export * from './responses'; diff --git a/packages/vertexai/src/types/googleAI/requests.ts b/packages/vertexai/src/types/googleAI/requests.ts deleted file mode 100644 index 94dfb4c4241..00000000000 --- a/packages/vertexai/src/types/googleAI/requests.ts +++ /dev/null @@ -1,29 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Content, Part } from '../content'; -import { GenerationConfig, Tool } from '../requests'; - -export interface GoogleAICountTokensRequest { - generateContentRequest: { - model: string; // 'models/model-name' - contents: Content[]; - systemInstruction?: string | Part | Content; - tools?: Tool[]; - generationConfig?: GenerationConfig; - }; -} diff --git a/packages/vertexai/src/types/googleAI/responses.ts b/packages/vertexai/src/types/googleAI/responses.ts deleted file mode 100644 index 702464cecc7..00000000000 --- a/packages/vertexai/src/types/googleAI/responses.ts +++ /dev/null @@ -1,46 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Content } from '../content'; -import { FinishReason } from '../enums'; -import { - Citation, - GroundingMetadata, - PromptFeedback, - SafetyRating, - UsageMetadata -} from '../responses'; - -export interface GoogleAIGenerateContentResponse { - candidates?: GoogleAIGenerateContentCandidate[]; - promptFeedback?: PromptFeedback; - usageMetadata?: UsageMetadata; -} - -export interface GoogleAIGenerateContentCandidate { - index: number; - content: Content; - finishReason?: FinishReason; - finishMessage?: string; - safetyRatings?: SafetyRating[]; - citationMetadata?: GoogleAICitationMetadata; - groundingMetadata?: GroundingMetadata; -} - -export interface GoogleAICitationMetadata { - citationSources: Citation[]; // Maps to `citations` -} diff --git a/packages/vertexai/src/types/googleai.ts b/packages/vertexai/src/types/googleai.ts new file mode 100644 index 00000000000..c4a3580c9a2 --- /dev/null +++ b/packages/vertexai/src/types/googleai.ts @@ -0,0 +1,44 @@ +import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata } from '../public-types'; +import { Content, Part } from './content'; + +/** + * @internal + */ +export interface GoogleAICountTokensRequest { + generateContentRequest: { + model: string; // 'models/model-name' + contents: Content[]; + systemInstruction?: string | Part | Content; + tools?: Tool[]; + generationConfig?: GenerationConfig; + }; +} + +/** + * @internal + */ +export interface GoogleAIGenerateContentResponse { + candidates?: GoogleAIGenerateContentCandidate[]; + promptFeedback?: PromptFeedback; + usageMetadata?: UsageMetadata; +} + +/** + * @internal + */ +export interface GoogleAIGenerateContentCandidate { + index: number; + content: Content; + finishReason?: FinishReason; + finishMessage?: string; + safetyRatings?: SafetyRating[]; + citationMetadata?: GoogleAICitationMetadata; + groundingMetadata?: GroundingMetadata; +} + +/** + * @internal + */ +export interface GoogleAICitationMetadata { + citationSources: Citation[]; // Maps to `citations` +} \ No newline at end of file diff --git a/packages/vertexai/src/types/index.ts b/packages/vertexai/src/types/index.ts index f575c5ba8e9..01f3e7a701a 100644 --- a/packages/vertexai/src/types/index.ts +++ b/packages/vertexai/src/types/index.ts @@ -22,3 +22,4 @@ export * from './responses'; export * from './error'; export * from './schema'; export * from './imagen'; +export * from './googleai'; diff --git a/packages/vertexai/src/types/responses.ts b/packages/vertexai/src/types/responses.ts index 844b0b2934b..f8132533d66 100644 --- a/packages/vertexai/src/types/responses.ts +++ b/packages/vertexai/src/types/responses.ts @@ -108,7 +108,10 @@ export interface ModalityTokenCount { export interface PromptFeedback { blockReason?: BlockReason; safetyRatings: SafetyRating[]; - blockReasonMessage?: string; // This will always be undefined when using Google AI. + /** + * This field is unsupported in Google AI. + */ + blockReasonMessage?: string; } /** @@ -142,8 +145,14 @@ export interface Citation { endIndex?: number; uri?: string; license?: string; - title?: string; // This will always be undefined when using Google AI. - publicationDate?: Date; // This will always be undefined when using Google AI. + /** + * This field is not supported in Google AI. + */ + title?: string; + /** + * This field is not supported in Google AI. + */ + publicationDate?: Date; } /** @@ -212,10 +221,20 @@ export interface Date { export interface SafetyRating { category: HarmCategory; probability: HarmProbability; + /** + * This field is not supported in Google AI, so it will default to `HarmSeverity.UNSUPPORTED` + * when using Google AI. + */ severity: HarmSeverity; + /** + * This field is not supported in Google AI, so it will default to 0 when using Google AI. + */ probabilityScore: number; + /** + * This field is not supported in Google AI, so it will default to 0 when using Google AI. + */ severityScore: number; - blocked: boolean; // FIXME: This is only included when it's true. Either set a default of false, or make this optional. + blocked: boolean; } /** @@ -230,8 +249,10 @@ export interface CountTokensResponse { /** * The total number of billable characters counted across all instances * from the request. + * + * This field is not supported in Google AI, so it will default to 0 when using Google AI. */ - totalBillableCharacters?: number; // This will always be undefined when using Google AI. + totalBillableCharacters?: number; /** * The breakdown, by modality, of how many tokens are consumed by the prompt. */ From 48fc75c6e02578133e62c5f48cb2cf23175f0579 Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 22 Apr 2025 14:42:55 -0400 Subject: [PATCH 06/16] Format --- packages/vertexai/src/backend.test.ts | 29 +++++++++++++++++----- packages/vertexai/src/backend.ts | 23 ++++++++++++++--- packages/vertexai/src/googleai-mappers.ts | 3 ++- packages/vertexai/src/types/googleai.ts | 30 +++++++++++++++++++++-- packages/vertexai/src/types/responses.ts | 2 +- 5 files changed, 74 insertions(+), 13 deletions(-) diff --git a/packages/vertexai/src/backend.test.ts b/packages/vertexai/src/backend.test.ts index 837ba04519e..ec754bd41f0 100644 --- a/packages/vertexai/src/backend.test.ts +++ b/packages/vertexai/src/backend.test.ts @@ -1,11 +1,28 @@ -import { expect } from "chai"; -import { GoogleAIBackend, VertexAIBackend } from "./backend"; -import { BackendType } from "./public-types"; -import { DEFAULT_LOCATION } from "./constants"; +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect } from 'chai'; +import { GoogleAIBackend, VertexAIBackend } from './backend'; +import { BackendType } from './public-types'; +import { DEFAULT_LOCATION } from './constants'; describe('Backend', () => { describe('GoogleAIBackend', () => { - it('sets backendType to GOOGLE_AI', () => { + it('sets backendType to GOOGLE_AI', () => { const backend = new GoogleAIBackend(); expect(backend.backendType).to.equal(BackendType.GOOGLE_AI); }); @@ -32,4 +49,4 @@ describe('Backend', () => { expect(backend.location).to.equal(DEFAULT_LOCATION); }); }); -}); \ No newline at end of file +}); diff --git a/packages/vertexai/src/backend.ts b/packages/vertexai/src/backend.ts index 40f0884712a..cb80c158b8a 100644 --- a/packages/vertexai/src/backend.ts +++ b/packages/vertexai/src/backend.ts @@ -1,5 +1,22 @@ -import { DEFAULT_LOCATION } from "./constants"; -import { BackendType } from "./public-types"; +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { DEFAULT_LOCATION } from './constants'; +import { BackendType } from './public-types'; /** * Abstract base class representing the configuration for an AI service backend. @@ -69,4 +86,4 @@ export class VertexAIBackend extends Backend { this.location = location; } } -} \ No newline at end of file +} diff --git a/packages/vertexai/src/googleai-mappers.ts b/packages/vertexai/src/googleai-mappers.ts index ede91fe5756..405dbd8d8c9 100644 --- a/packages/vertexai/src/googleai-mappers.ts +++ b/packages/vertexai/src/googleai-mappers.ts @@ -164,7 +164,8 @@ export function mapGenerateContentCandidates( mappedSafetyRatings = candidate.safetyRatings.map(safetyRating => { return { ...safetyRating, - severity: safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, + severity: + safetyRating.severity ?? HarmSeverity.HARM_SEVERITY_UNSUPPORTED, probabilityScore: safetyRating.probabilityScore ?? 0, severityScore: safetyRating.severityScore ?? 0 }; diff --git a/packages/vertexai/src/types/googleai.ts b/packages/vertexai/src/types/googleai.ts index c4a3580c9a2..38c27b3fe8b 100644 --- a/packages/vertexai/src/types/googleai.ts +++ b/packages/vertexai/src/types/googleai.ts @@ -1,4 +1,30 @@ -import { Tool, GenerationConfig, Citation, FinishReason, GroundingMetadata, PromptFeedback, SafetyRating, UsageMetadata } from '../public-types'; +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { + Tool, + GenerationConfig, + Citation, + FinishReason, + GroundingMetadata, + PromptFeedback, + SafetyRating, + UsageMetadata +} from '../public-types'; import { Content, Part } from './content'; /** @@ -41,4 +67,4 @@ export interface GoogleAIGenerateContentCandidate { */ export interface GoogleAICitationMetadata { citationSources: Citation[]; // Maps to `citations` -} \ No newline at end of file +} diff --git a/packages/vertexai/src/types/responses.ts b/packages/vertexai/src/types/responses.ts index f8132533d66..1822d8ade84 100644 --- a/packages/vertexai/src/types/responses.ts +++ b/packages/vertexai/src/types/responses.ts @@ -249,7 +249,7 @@ export interface CountTokensResponse { /** * The total number of billable characters counted across all instances * from the request. - * + * * This field is not supported in Google AI, so it will default to 0 when using Google AI. */ totalBillableCharacters?: number; From cee1faefea990f32b976d5b15f5be354ca253dc2 Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Tue, 22 Apr 2025 15:29:35 -0400 Subject: [PATCH 07/16] Encode/decode instance identifiers directly to/from backends --- common/api-review/vertexai.api.md | 18 +++++++- packages/vertexai/src/api.ts | 34 +++------------ packages/vertexai/src/constants.ts | 6 --- packages/vertexai/src/helpers.test.ts | 46 +++++++-------------- packages/vertexai/src/helpers.ts | 50 +++++++++-------------- packages/vertexai/src/index.node.ts | 28 ++++++------- packages/vertexai/src/index.ts | 1 + packages/vertexai/src/requests/request.ts | 2 +- packages/vertexai/src/types/internal.ts | 8 +--- 9 files changed, 72 insertions(+), 121 deletions(-) diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index c4923bc52f3..1650bf3381e 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -12,7 +12,6 @@ import { FirebaseError } from '@firebase/util'; // @public export interface AI { app: FirebaseApp; - // Warning: (ae-forgotten-export) The symbol "Backend" needs to be exported by the entry point index.d.ts backend: Backend; // @deprecated location: string; @@ -75,6 +74,12 @@ export class ArraySchema extends Schema { toJSON(): SchemaRequest; } +// @public +export abstract class Backend { + protected constructor(type: BackendType); + readonly backendType: BackendType; +} + // @public export const BackendType: { readonly VERTEX_AI: "VERTEX_AI"; @@ -418,6 +423,11 @@ export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOp // @public export function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions): VertexAI; +// @public +export class GoogleAIBackend extends Backend { + constructor(); +} + // Warning: (ae-internal-missing-underscore) The name "GoogleAICitationMetadata" should be prefixed with an underscore because the declaration is marked as @internal // // @internal (undocumented) @@ -897,6 +907,12 @@ export interface UsageMetadata { // @public export type VertexAI = AI; +// @public +export class VertexAIBackend extends Backend { + constructor(location?: string); + readonly location: string; +} + // @public export const VertexAIError: typeof AIError; diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index b1eff5c7378..4f0c407e397 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -18,15 +18,9 @@ import { FirebaseApp, getApp, _getProvider } from '@firebase/app'; import { Provider } from '@firebase/component'; import { getModularInstance } from '@firebase/util'; -import { DEFAULT_LOCATION, AI_TYPE } from './constants'; +import { AI_TYPE } from './constants'; import { AIService } from './service'; -import { - BackendType, - AI, - AIOptions, - VertexAI, - VertexAIOptions -} from './public-types'; +import { AI, AIOptions, VertexAI, VertexAIOptions } from './public-types'; import { ImagenModelParams, ModelParams, @@ -42,6 +36,7 @@ export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; export { ImagenImageFormat } from './requests/imagen-image-format'; export { AIModel, GenerativeModel, ImagenModel, AIError }; +export { Backend, VertexAIBackend, GoogleAIBackend } from './backend'; export { AIErrorCode as VertexAIErrorCode }; @@ -86,10 +81,8 @@ export function getVertexAI( // Dependencies const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE); - const identifier = encodeInstanceIdentifier({ - backendType: BackendType.VERTEX_AI, - location: options?.location ?? DEFAULT_LOCATION - }); + const backend = new VertexAIBackend(options?.location); + const identifier = encodeInstanceIdentifier(backend); return AIProvider.getImmediate({ identifier }); @@ -131,22 +124,7 @@ export function getAI( // Dependencies const AIProvider: Provider<'AI'> = _getProvider(app, AI_TYPE); - let identifier: string; - if (options.backend instanceof GoogleAIBackend) { - identifier = encodeInstanceIdentifier({ - backendType: BackendType.GOOGLE_AI - }); - } else if (options.backend instanceof VertexAIBackend) { - identifier = encodeInstanceIdentifier({ - backendType: BackendType.VERTEX_AI, - location: options.backend.location ?? DEFAULT_LOCATION - }); - } else { - throw new AIError( - AIErrorCode.ERROR, - `Invalid backend type: ${options.backend.backendType}` - ); - } + const identifier = encodeInstanceIdentifier(options.backend); return AIProvider.getImmediate({ identifier }); diff --git a/packages/vertexai/src/constants.ts b/packages/vertexai/src/constants.ts index 8bb46222ddd..6339ce63017 100644 --- a/packages/vertexai/src/constants.ts +++ b/packages/vertexai/src/constants.ts @@ -16,18 +16,12 @@ */ import { version } from '../package.json'; -import { BackendType } from './public-types'; -import { InstanceIdentifier } from './types/internal'; // TODO (v12): Remove this export const VERTEX_TYPE = 'vertexAI'; export const AI_TYPE = 'AI'; -export const DEFAULT_INSTANCE_IDENTIFIER: InstanceIdentifier = { - backendType: BackendType.GOOGLE_AI -}; - export const DEFAULT_LOCATION = 'us-central1'; export const DEFAULT_BASE_URL = 'https://firebasevertexai.googleapis.com'; diff --git a/packages/vertexai/src/helpers.test.ts b/packages/vertexai/src/helpers.test.ts index 5fcf954ac37..533edf79e5f 100644 --- a/packages/vertexai/src/helpers.test.ts +++ b/packages/vertexai/src/helpers.test.ts @@ -18,52 +18,39 @@ import { expect } from 'chai'; import { AI_TYPE } from './constants'; import { encodeInstanceIdentifier, decodeInstanceIdentifier } from './helpers'; import { AIError } from './errors'; -import { BackendType } from './public-types'; -import { InstanceIdentifier } from './types/internal'; import { AIErrorCode } from './types'; +import { GoogleAIBackend, VertexAIBackend } from './backend'; describe('Identifier Encoding/Decoding', () => { describe('encodeInstanceIdentifier', () => { it('should encode Vertex AI identifier with a specific location', () => { - const identifier: InstanceIdentifier = { - backendType: BackendType.VERTEX_AI, - location: 'us-central1' - }; + const backend = new VertexAIBackend('us-central1'); const expected = `${AI_TYPE}/vertexai/us-central1`; - expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + expect(encodeInstanceIdentifier(backend)).to.equal(expected); }); it('should encode Vertex AI identifier using empty location', () => { - const identifier: InstanceIdentifier = { - backendType: BackendType.VERTEX_AI, - location: '' - }; + const backend = new VertexAIBackend(''); const expected = `${AI_TYPE}/vertexai/`; - expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + expect(encodeInstanceIdentifier(backend)).to.equal(expected); }); it('should encode Google AI identifier', () => { - const identifier: InstanceIdentifier = { - backendType: BackendType.GOOGLE_AI - }; + const backend = new GoogleAIBackend(); const expected = `${AI_TYPE}/googleai`; - expect(encodeInstanceIdentifier(identifier)).to.equal(expected); + expect(encodeInstanceIdentifier(backend)).to.equal(expected); }); it('should throw AIError for unknown backend type', () => { - const identifier = { - backendType: 'some-future-backend' - } as any; // bypass type checking for the test - - expect(() => encodeInstanceIdentifier(identifier)).to.throw(AIError); + expect(() => encodeInstanceIdentifier({} as any)).to.throw(AIError); try { - encodeInstanceIdentifier(identifier); + encodeInstanceIdentifier({} as any); expect.fail('Expected encodeInstanceIdentifier to throw'); } catch (e) { expect(e).to.be.instanceOf(AIError); const error = e as AIError; - expect(error.message).to.contain(`Unknown backend`); + expect(error.message).to.contain('Invalid backend'); expect(error.code).to.equal(AIErrorCode.ERROR); } }); @@ -72,11 +59,8 @@ describe('Identifier Encoding/Decoding', () => { describe('decodeInstanceIdentifier', () => { it('should decode Vertex AI identifier with location', () => { const encoded = `${AI_TYPE}/vertexai/europe-west1`; - const expected: InstanceIdentifier = { - backendType: BackendType.VERTEX_AI, - location: 'europe-west1' - }; - expect(decodeInstanceIdentifier(encoded)).to.deep.equal(expected); + const backend = new VertexAIBackend('europe-west1'); + expect(decodeInstanceIdentifier(encoded)).to.deep.equal(backend); }); it('should throw an error if Vertex AI identifier string without explicit location part', () => { @@ -98,10 +82,8 @@ describe('Identifier Encoding/Decoding', () => { it('should decode Google AI identifier', () => { const encoded = `${AI_TYPE}/googleai`; - const expected: InstanceIdentifier = { - backendType: BackendType.GOOGLE_AI - }; - expect(decodeInstanceIdentifier(encoded)).to.deep.equal(expected); + const backend = new GoogleAIBackend(); + expect(decodeInstanceIdentifier(encoded)).to.deep.equal(backend); }); it('should throw AIError for invalid backend string', () => { diff --git a/packages/vertexai/src/helpers.ts b/packages/vertexai/src/helpers.ts index 764d06fe9f7..709bf4369c5 100644 --- a/packages/vertexai/src/helpers.ts +++ b/packages/vertexai/src/helpers.ts @@ -17,41 +17,34 @@ import { AI_TYPE } from './constants'; import { AIError } from './errors'; -import { BackendType } from './public-types'; -import { InstanceIdentifier } from './types/internal'; import { AIErrorCode } from './types'; +import { Backend, GoogleAIBackend, VertexAIBackend } from './backend'; /** - * Encodes an {@link InstanceIdentifier} into a string. - * - * This string is used to identify unique {@link AI} instances by backend type. + * Encodes a {@link Backend} into a string that will be used to uniquely identify {@link AI} + * instances by backend type. * * @internal */ -export function encodeInstanceIdentifier( - instanceIdentifier: InstanceIdentifier -): string { - switch (instanceIdentifier.backendType) { - case BackendType.VERTEX_AI: - return `${AI_TYPE}/vertexai/${instanceIdentifier.location}`; - case BackendType.GOOGLE_AI: - return `${AI_TYPE}/googleai`; - default: - throw new AIError( - AIErrorCode.ERROR, - `Unknown backend '${instanceIdentifier}'` - ); +export function encodeInstanceIdentifier(backend: Backend): string { + if (backend instanceof GoogleAIBackend) { + return `${AI_TYPE}/googleai`; + } else if (backend instanceof VertexAIBackend) { + return `${AI_TYPE}/vertexai/${backend.location}`; + } else { + throw new AIError( + AIErrorCode.ERROR, + `Invalid backend: ${JSON.stringify(backend.backendType)}` + ); } } /** - * Decodes an instance identifier string into an {@link InstanceIdentifier}. + * Decodes an instance identifier string into a {@link Backend}. * * @internal */ -export function decodeInstanceIdentifier( - instanceIdentifier: string -): InstanceIdentifier { +export function decodeInstanceIdentifier(instanceIdentifier: string): Backend { const identifierParts = instanceIdentifier.split('/'); if (identifierParts[0] !== AI_TYPE) { throw new AIError( @@ -59,8 +52,8 @@ export function decodeInstanceIdentifier( `Invalid instance identifier, unknown prefix '${identifierParts[0]}'` ); } - const backend = identifierParts[1]; - switch (backend) { + const backendType = identifierParts[1]; + switch (backendType) { case 'vertexai': const location: string | undefined = identifierParts[2]; if (!location) { @@ -69,14 +62,9 @@ export function decodeInstanceIdentifier( `Invalid instance identifier, unknown location '${instanceIdentifier}'` ); } - return { - backendType: BackendType.VERTEX_AI, - location - }; + return new VertexAIBackend(location); case 'googleai': - return { - backendType: BackendType.GOOGLE_AI - }; + return new GoogleAIBackend(); default: throw new AIError( AIErrorCode.ERROR, diff --git a/packages/vertexai/src/index.node.ts b/packages/vertexai/src/index.node.ts index 60ed127c19d..1908e65b1cd 100644 --- a/packages/vertexai/src/index.node.ts +++ b/packages/vertexai/src/index.node.ts @@ -23,33 +23,31 @@ import { registerVersion, _registerComponent } from '@firebase/app'; import { AIService } from './service'; -import { DEFAULT_INSTANCE_IDENTIFIER, AI_TYPE } from './constants'; +import { AI_TYPE } from './constants'; import { Component, ComponentType } from '@firebase/component'; import { name, version } from '../package.json'; -import { InstanceIdentifier } from './types/internal'; import { decodeInstanceIdentifier } from './helpers'; +import { AIError } from './errors'; +import { AIErrorCode } from './public-types'; function registerAI(): void { _registerComponent( new Component( AI_TYPE, - (container, options) => { - // getImmediate for FirebaseApp will always succeed - const app = container.getProvider('app').getImmediate(); - const auth = container.getProvider('auth-internal'); - const appCheckProvider = container.getProvider('app-check-internal'); - - let instanceIdentifier: InstanceIdentifier; - if (options.instanceIdentifier) { - instanceIdentifier = decodeInstanceIdentifier( - options.instanceIdentifier + (container, { instanceIdentifier }) => { + if (!instanceIdentifier) { + throw new AIError( + AIErrorCode.ERROR, + 'AIService instance identifier is undefined.' ); - } else { - instanceIdentifier = DEFAULT_INSTANCE_IDENTIFIER; } - const backend = instanceIdentifier; + const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed + const app = container.getProvider('app').getImmediate(); + const auth = container.getProvider('auth-internal'); + const appCheckProvider = container.getProvider('app-check-internal'); return new AIService(app, backend, auth, appCheckProvider); }, ComponentType.PUBLIC diff --git a/packages/vertexai/src/index.ts b/packages/vertexai/src/index.ts index 2f275e9d903..8451d68bbf0 100644 --- a/packages/vertexai/src/index.ts +++ b/packages/vertexai/src/index.ts @@ -49,6 +49,7 @@ function registerAI(): void { } const backend = decodeInstanceIdentifier(instanceIdentifier); + // getImmediate for FirebaseApp will always succeed const app = container.getProvider('app').getImmediate(); const auth = container.getProvider('auth-internal'); diff --git a/packages/vertexai/src/requests/request.ts b/packages/vertexai/src/requests/request.ts index 7ff51d71e3d..31c5e9b8125 100644 --- a/packages/vertexai/src/requests/request.ts +++ b/packages/vertexai/src/requests/request.ts @@ -66,7 +66,7 @@ export class RequestUrl { } else { throw new AIError( AIErrorCode.ERROR, - `Invalid backend: ${this.apiSettings.backend}` + `Invalid backend: ${JSON.stringify(this.apiSettings.backend)}` ); } } diff --git a/packages/vertexai/src/types/internal.ts b/packages/vertexai/src/types/internal.ts index 4dde95d520e..a41ec5652d3 100644 --- a/packages/vertexai/src/types/internal.ts +++ b/packages/vertexai/src/types/internal.ts @@ -18,7 +18,6 @@ import { AppCheckTokenResult } from '@firebase/app-check-interop-types'; import { FirebaseAuthTokenData } from '@firebase/auth-interop-types'; import { Backend } from '../backend'; -import { BackendType } from '../public-types'; export * from './imagen/internal'; @@ -28,15 +27,10 @@ export interface ApiSettings { appId: string; automaticDataCollectionEnabled?: boolean; /** - * @deprecated + * @deprecated Use `backend.location` instead. */ location: string; backend: Backend; getAuthToken?: () => Promise; getAppCheckToken?: () => Promise; } - -export interface InstanceIdentifier { - backendType: BackendType; - location?: string; -} From 475c81a735a30221326688b9d5081d83183eb660 Mon Sep 17 00:00:00 2001 From: Daniel La Rocque Date: Wed, 23 Apr 2025 11:19:30 -0400 Subject: [PATCH 08/16] Update changeset --- .changeset/tall-zoos-stare.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changeset/tall-zoos-stare.md b/.changeset/tall-zoos-stare.md index f4e7601b4a4..f6bbe141228 100644 --- a/.changeset/tall-zoos-stare.md +++ b/.changeset/tall-zoos-stare.md @@ -3,4 +3,4 @@ '@firebase/vertexai': minor --- -Add support for the Google AI API, enabling usage in a free tier, and migrate from `VertexAI` naming to `GenAI`. +Add support for the Google AI API, enabling usage in a free tier, and add new `AI` API to accomodate new product naming. From abf0491aef48cd6e02d25732debbbdb9259c326a Mon Sep 17 00:00:00 2001 From: Pavan Shankar Date: Wed, 16 Apr 2025 10:06:31 +0530 Subject: [PATCH 09/16] Complete Hybrid inference impl Fix languageCode parameter in action_code_url (#8912) * Fix languageCode parameter in action_code_url * Add changeset Vaihi add langmodel types. (#8927) * Adding LanguageModel types. These are based off https://github.com/webmachinelearning/prompt-api?tab=readme-ov-file#full-api-surface-in-web-idl * Adding LanguageModel types. * Remove bunch of exports * yarn formatted * after lint Define HybridParams (#8935) Co-authored-by: Erik Eldridge Adding smoke test for new hybrid params (#8937) * Adding smoke test for new hybrid params * Use the existing name of the model params input --------- Co-authored-by: Erik Eldridge Moving to in-cloud naming (#8938) Co-authored-by: Erik Eldridge Moving to string type for the inference mode (#8941) Define ChromeAdapter class (#8942) Co-authored-by: Erik Eldridge VinF Hybrid Inference: Implement ChromeAdapter (rebased) (#8943) Adding count token impl (#8950) VinF Hybrid Inference #4: ChromeAdapter in stream methods (rebased) (#8949) Define values for Availability enum (#8951) VinF Hybrid Inference: narrow Chrome input type (#8953) Add image inference support (#8954) * Adding image based input for inference * adding image as input to create language model object disable count tokens api for on-device inference (#8962) VinF Hybrid Inference: throw if only_on_device and model is unavailable (#8965) --- e2e/sample-apps/modular.js | 36 +- packages/vertexai/src/api.test.ts | 15 + packages/vertexai/src/api.ts | 29 +- .../vertexai/src/methods/chat-session.test.ts | 19 +- packages/vertexai/src/methods/chat-session.ts | 4 + .../src/methods/chrome-adapter.test.ts | 473 ++++++++++++++++++ .../vertexai/src/methods/chrome-adapter.ts | 287 +++++++++++ .../vertexai/src/methods/count-tokens.test.ts | 44 +- packages/vertexai/src/methods/count-tokens.ts | 17 +- .../src/methods/generate-content.test.ts | 66 ++- .../vertexai/src/methods/generate-content.ts | 53 +- .../src/models/generative-model.test.ts | 147 ++++-- .../vertexai/src/models/generative-model.ts | 16 +- packages/vertexai/src/types/language-model.ts | 82 +++ packages/vertexai/src/types/requests.ts | 27 + 15 files changed, 1214 insertions(+), 101 deletions(-) create mode 100644 packages/vertexai/src/methods/chrome-adapter.test.ts create mode 100644 packages/vertexai/src/methods/chrome-adapter.ts create mode 100644 packages/vertexai/src/types/language-model.ts diff --git a/e2e/sample-apps/modular.js b/e2e/sample-apps/modular.js index 9e943e04494..aeebe19a4b1 100644 --- a/e2e/sample-apps/modular.js +++ b/e2e/sample-apps/modular.js @@ -58,7 +58,7 @@ import { onValue, off } from 'firebase/database'; -import { getGenerativeModel, getVertexAI, VertexAI } from 'firebase/vertexai'; +import { getGenerativeModel, getVertexAI } from 'firebase/vertexai'; import { getDataConnect, DataConnect } from 'firebase/data-connect'; /** @@ -313,9 +313,15 @@ function callPerformance(app) { async function callVertexAI(app) { console.log('[VERTEXAI] start'); const vertexAI = getVertexAI(app); - const model = getGenerativeModel(vertexAI, { model: 'gemini-1.5-flash' }); - const result = await model.countTokens('abcdefg'); - console.log(`[VERTEXAI] counted tokens: ${result.totalTokens}`); + const model = getGenerativeModel(vertexAI, { + mode: 'only_on_device' + }); + const singleResult = await model.generateContent([ + { text: 'describe the following:' }, + { text: 'the mojave desert' } + ]); + console.log(`Generated text: ${singleResult.response.text()}`); + console.log(`[VERTEXAI] end`); } /** @@ -341,18 +347,18 @@ async function main() { const app = initializeApp(config); setLogLevel('warn'); - callAppCheck(app); - await authLogin(app); - await callStorage(app); - await callFirestore(app); - await callDatabase(app); - await callMessaging(app); - callAnalytics(app); - callPerformance(app); - await callFunctions(app); + // callAppCheck(app); + // await authLogin(app); + // await callStorage(app); + // await callFirestore(app); + // await callDatabase(app); + // await callMessaging(app); + // callAnalytics(app); + // callPerformance(app); + // await callFunctions(app); await callVertexAI(app); - callDataConnect(app); - await authLogout(app); + // callDataConnect(app); + // await authLogout(app); console.log('DONE'); } diff --git a/packages/vertexai/src/api.test.ts b/packages/vertexai/src/api.test.ts index 0554ff46441..9d21dedb14e 100644 --- a/packages/vertexai/src/api.test.ts +++ b/packages/vertexai/src/api.test.ts @@ -101,6 +101,21 @@ describe('Top level API', () => { expect(genModel).to.be.an.instanceOf(GenerativeModel); expect(genModel.model).to.equal('publishers/google/models/my-model'); }); + it('getGenerativeModel with HybridParams sets a default model', () => { + const genModel = getGenerativeModel(fakeAI, { + mode: 'only_on_device' + }); + expect(genModel.model).to.equal( + `publishers/google/models/${GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL}` + ); + }); + it('getGenerativeModel with HybridParams honors a model override', () => { + const genModel = getGenerativeModel(fakeAI, { + mode: 'prefer_on_device', + inCloudParams: { model: 'my-model' } + }); + expect(genModel.model).to.equal('publishers/google/models/my-model'); + }); it('getImagenModel throws if no model is provided', () => { try { getImagenModel(fakeAI, {} as ImagenModelParams); diff --git a/packages/vertexai/src/api.ts b/packages/vertexai/src/api.ts index 4f0c407e397..1da5914682d 100644 --- a/packages/vertexai/src/api.ts +++ b/packages/vertexai/src/api.ts @@ -23,6 +23,7 @@ import { AIService } from './service'; import { AI, AIOptions, VertexAI, VertexAIOptions } from './public-types'; import { ImagenModelParams, + HybridParams, ModelParams, RequestOptions, AIErrorCode @@ -31,6 +32,8 @@ import { AIError } from './errors'; import { AIModel, GenerativeModel, ImagenModel } from './models'; import { encodeInstanceIdentifier } from './helpers'; import { GoogleAIBackend, VertexAIBackend } from './backend'; +import { ChromeAdapter } from './methods/chrome-adapter'; +import { LanguageModel } from './types/language-model'; export { ChatSession } from './methods/chat-session'; export * from './requests/schema-builder'; @@ -138,16 +141,36 @@ export function getAI( */ export function getGenerativeModel( ai: AI, - modelParams: ModelParams, + modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions ): GenerativeModel { - if (!modelParams.model) { + // Uses the existence of HybridParams.mode to clarify the type of the modelParams input. + const hybridParams = modelParams as HybridParams; + let inCloudParams: ModelParams; + if (hybridParams.mode) { + inCloudParams = hybridParams.inCloudParams || { + model: GenerativeModel.DEFAULT_HYBRID_IN_CLOUD_MODEL + }; + } else { + inCloudParams = modelParams as ModelParams; + } + + if (!inCloudParams.model) { throw new AIError( AIErrorCode.NO_MODEL, `Must provide a model name. Example: getGenerativeModel({ model: 'my-model-name' })` ); } - return new GenerativeModel(ai, modelParams, requestOptions); + return new GenerativeModel( + ai, + inCloudParams, + new ChromeAdapter( + window.LanguageModel as LanguageModel, + hybridParams.mode, + hybridParams.onDeviceParams + ), + requestOptions + ); } /** diff --git a/packages/vertexai/src/methods/chat-session.test.ts b/packages/vertexai/src/methods/chat-session.test.ts index 0564aa84ed6..ed0b4d4877f 100644 --- a/packages/vertexai/src/methods/chat-session.test.ts +++ b/packages/vertexai/src/methods/chat-session.test.ts @@ -24,6 +24,7 @@ import { GenerateContentStreamResult } from '../types'; import { ChatSession } from './chat-session'; import { ApiSettings } from '../types/internal'; import { VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -46,7 +47,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContent' ).rejects('generateContent failed'); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await expect(chatSession.sendMessage('hello')).to.be.rejected; expect(generateContentStub).to.be.calledWith( fakeApiSettings, @@ -63,7 +68,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContentStream' ).rejects('generateContentStream failed'); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await expect(chatSession.sendMessageStream('hello')).to.be.rejected; expect(generateContentStreamStub).to.be.calledWith( fakeApiSettings, @@ -82,7 +91,11 @@ describe('ChatSession', () => { generateContentMethods, 'generateContentStream' ).resolves({} as unknown as GenerateContentStreamResult); - const chatSession = new ChatSession(fakeApiSettings, 'a-model'); + const chatSession = new ChatSession( + fakeApiSettings, + 'a-model', + new ChromeAdapter() + ); await chatSession.sendMessageStream('hello'); expect(generateContentStreamStub).to.be.calledWith( fakeApiSettings, diff --git a/packages/vertexai/src/methods/chat-session.ts b/packages/vertexai/src/methods/chat-session.ts index 60794001e37..112ddf5857e 100644 --- a/packages/vertexai/src/methods/chat-session.ts +++ b/packages/vertexai/src/methods/chat-session.ts @@ -30,6 +30,7 @@ import { validateChatHistory } from './chat-session-helpers'; import { generateContent, generateContentStream } from './generate-content'; import { ApiSettings } from '../types/internal'; import { logger } from '../logger'; +import { ChromeAdapter } from './chrome-adapter'; /** * Do not log a message for this error. @@ -50,6 +51,7 @@ export class ChatSession { constructor( apiSettings: ApiSettings, public model: string, + private chromeAdapter: ChromeAdapter, public params?: StartChatParams, public requestOptions?: RequestOptions ) { @@ -95,6 +97,7 @@ export class ChatSession { this._apiSettings, this.model, generateContentRequest, + this.chromeAdapter, this.requestOptions ) ) @@ -146,6 +149,7 @@ export class ChatSession { this._apiSettings, this.model, generateContentRequest, + this.chromeAdapter, this.requestOptions ); diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts new file mode 100644 index 00000000000..859a02a4e85 --- /dev/null +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -0,0 +1,473 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { AIError } from '../errors'; +import { expect, use } from 'chai'; +import sinonChai from 'sinon-chai'; +import chaiAsPromised from 'chai-as-promised'; +import { ChromeAdapter } from './chrome-adapter'; +import { + Availability, + LanguageModel, + LanguageModelCreateOptions, + LanguageModelMessageContent +} from '../types/language-model'; +import { match, stub } from 'sinon'; +import { GenerateContentRequest, AIErrorCode } from '../types'; + +use(sinonChai); +use(chaiAsPromised); + +/** + * Converts the ReadableStream from response.body to an array of strings. + */ +async function toStringArray( + stream: ReadableStream +): Promise { + const decoder = new TextDecoder(); + const actual = []; + const reader = stream.getReader(); + while (true) { + const { done, value } = await reader.read(); + if (done) { + break; + } + actual.push(decoder.decode(value)); + } + return actual; +} + +describe('ChromeAdapter', () => { + describe('isAvailable', () => { + it('returns false if mode is only cloud', async () => { + const adapter = new ChromeAdapter(undefined, 'only_in_cloud'); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if LanguageModel API is undefined', async () => { + const adapter = new ChromeAdapter(undefined, 'prefer_on_device'); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if request contents empty', async () => { + const adapter = new ChromeAdapter( + { + availability: async () => Availability.available + } as LanguageModel, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [] + }) + ).to.be.false; + }); + it('returns false if request content has function role', async () => { + const adapter = new ChromeAdapter( + { + availability: async () => Availability.available + } as LanguageModel, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [ + { + role: 'function', + parts: [] + } + ] + }) + ).to.be.false; + }); + it('returns true if model is readily available', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.available) + } as LanguageModel; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.true; + }); + it('returns false and triggers download when model is available after download', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + {} as LanguageModel + ); + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.false; + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + }); + it('avoids redundant downloads', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + const downloadPromise = new Promise(() => { + /* never resolves */ + }); + const createStub = stub(languageModelProvider, 'create').returns( + downloadPromise + ); + const adapter = new ChromeAdapter(languageModelProvider); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + expect(createStub).to.have.been.calledOnce; + }); + it('clears state when download completes', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.downloadable), + create: () => Promise.resolve({}) + } as LanguageModel; + let resolveDownload; + const downloadPromise = new Promise(resolveCallback => { + resolveDownload = resolveCallback; + }); + const createStub = stub(languageModelProvider, 'create').returns( + downloadPromise + ); + const adapter = new ChromeAdapter(languageModelProvider); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + resolveDownload!(); + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }); + expect(createStub).to.have.been.calledTwice; + }); + it('returns false when model is never available', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.unavailable), + create: () => Promise.resolve({}) + } as LanguageModel; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + expect( + await adapter.isAvailable({ + contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + }) + ).to.be.false; + }); + }); + describe('generateContent', () => { + it('throws if Chrome API is undefined', async () => { + const adapter = new ChromeAdapter(undefined, 'only_on_device'); + await expect( + adapter.generateContent({ + contents: [] + }) + ) + .to.eventually.be.rejectedWith( + AIError, + 'Chrome AI requested for unsupported browser version.' + ) + .and.have.property('code', AIErrorCode.REQUEST_ERROR); + }); + it('generates content', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('') + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const promptOutput = 'hi'; + const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); + const onDeviceParams = { + systemPrompt: 'be yourself' + } as LanguageModelCreateOptions; + const expectedOnDeviceParams = { + systemPrompt: 'be yourself', + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + onDeviceParams + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + const response = await adapter.generateContent(request); + // Asserts initialization params are proxied. + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + // Asserts Vertex input type is mapped to Chrome type. + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ]); + // Asserts expected output. + expect(await response.json()).to.deep.equal({ + candidates: [ + { + content: { + parts: [{ text: promptOutput }] + } + } + ] + }); + }); + it('generates content using image type input', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('') + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const promptOutput = 'hi'; + const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); + const onDeviceParams = { + systemPrompt: 'be yourself' + } as LanguageModelCreateOptions; + const expectedOnDeviceParams = { + systemPrompt: 'be yourself', + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + onDeviceParams + ); + const request = { + contents: [ + { + role: 'user', + parts: [ + { text: 'anything' }, + { + inlineData: { + data: sampleBase64EncodedImage, + mimeType: 'image/jpeg' + } + } + ] + } + ] + } as GenerateContentRequest; + const response = await adapter.generateContent(request); + // Asserts initialization params are proxied. + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + // Asserts Vertex input type is mapped to Chrome type. + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + }, + { + type: 'image', + content: match.instanceOf(ImageBitmap) + } + ]); + // Asserts expected output. + expect(await response.json()).to.deep.equal({ + candidates: [ + { + content: { + parts: [{ text: promptOutput }] + } + } + ] + }); + }); + }); + describe('countTokens', () => { + it('counts tokens is not yet available', async () => { + const inputText = 'first'; + // setting up stubs + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + measureInputUsage: _i => Promise.resolve(123) + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + + const countTokenRequest = { + contents: [{ role: 'user', parts: [{ text: inputText }] }] + } as GenerateContentRequest; + + try { + await adapter.countTokens(countTokenRequest); + } catch (e) { + // the call to countToken should be rejected with Error + expect((e as AIError).code).to.equal(AIErrorCode.REQUEST_ERROR); + expect((e as AIError).message).includes('not yet available'); + } + + // Asserts that no language model was initialized + expect(createStub).not.called; + }); + }); + describe('generateContentStream', () => { + it('generates content stream', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + promptStreaming: _i => new ReadableStream() + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const part = 'hi'; + const promptStub = stub(languageModel, 'promptStreaming').returns( + new ReadableStream({ + start(controller) { + controller.enqueue([part]); + controller.close(); + } + }) + ); + const onDeviceParams = {} as LanguageModelCreateOptions; + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + onDeviceParams + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + const response = await adapter.generateContentStream(request); + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ]); + const actual = await toStringArray(response.body!); + expect(actual).to.deep.equal([ + `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n` + ]); + }); + it('generates content stream with image input', async () => { + const languageModelProvider = { + create: () => Promise.resolve({}) + } as LanguageModel; + const languageModel = { + promptStreaming: _i => new ReadableStream() + } as LanguageModel; + const createStub = stub(languageModelProvider, 'create').resolves( + languageModel + ); + const part = 'hi'; + const promptStub = stub(languageModel, 'promptStreaming').returns( + new ReadableStream({ + start(controller) { + controller.enqueue([part]); + controller.close(); + } + }) + ); + const onDeviceParams = {} as LanguageModelCreateOptions; + const expectedOnDeviceParams = { + expectedInputs: [{ type: 'image' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + onDeviceParams + ); + const request = { + contents: [ + { + role: 'user', + parts: [ + { text: 'anything' }, + { + inlineData: { + data: sampleBase64EncodedImage, + mimeType: 'image/jpeg' + } + } + ] + } + ] + } as GenerateContentRequest; + const response = await adapter.generateContentStream(request); + expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(promptStub).to.have.been.calledOnceWith([ + { + type: 'text', + content: request.contents[0].parts[0].text + }, + { + type: 'image', + content: match.instanceOf(ImageBitmap) + } + ]); + const actual = await toStringArray(response.body!); + expect(actual).to.deep.equal([ + `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n` + ]); + }); + }); +}); + +// TODO: Move to using image from test-utils. +const sampleBase64EncodedImage = + '/9j/4QDeRXhpZgAASUkqAAgAAAAGABIBAwABAAAAAQAAABoBBQABAAAAVgAAABsBBQABAAAAXgAAACgBAwABAAAAAgAAABMCAwABAAAAAQAAAGmHBAABAAAAZgAAAAAAAABIAAAAAQAAAEgAAAABAAAABwAAkAcABAAAADAyMTABkQcABAAAAAECAwCGkgcAFgAAAMAAAAAAoAcABAAAADAxMDABoAMAAQAAAP//AAACoAQAAQAAAMgAAAADoAQAAQAAACwBAAAAAAAAQVNDSUkAAABQaWNzdW0gSUQ6IDM5MP/bAEMACAYGBwYFCAcHBwkJCAoMFA0MCwsMGRITDxQdGh8eHRocHCAkLicgIiwjHBwoNyksMDE0NDQfJzk9ODI8LjM0Mv/bAEMBCQkJDAsMGA0NGDIhHCEyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMv/CABEIASwAyAMBIgACEQEDEQH/xAAbAAABBQEBAAAAAAAAAAAAAAAAAQIDBAUGB//EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/9oADAMBAAIQAxAAAAHfA7ZFFgBQAAUUBQFBFABSUBQBQBZQUiqC7wAoigooQKACgCigKIoAosIKSigABWBdZAUAUAUQUUUAFIBQAWAFAUVFABSKoLqAKAKAKJVt4BvrFLAqKooArHgoQAoKiqDyKKoaiqhSqhCqgLFKHKdBiZmbodX5n2MbWHkdZS2kWhUBQIVUBwgUucv8Oad7nUzey3vPO5q4UrlOEWjzT0vhssDpea9Gy03BsqooKhCgCgCgHIcd0fN5DnuWHseY0Ureh+ZelLIqFq+f+gQJ5f6V5r6pE4i2ioDhCFVAVWrCiBxvJdlzFzVc56GjFoy4/a8d2q2TmpN3V1OF2MWp1/NrL0hzinRnO5Sdwc+L0Jz5HQLzyy9AYQYmDrZfXkyxVs5m4yVt3F0/M7l1YotpQnScdumqsFSb0yElm4zf5hjvV56bOtteViXq3ecRMbJgG+L4tzGqNyTDJNqMx5rfSHGRdpAcidPqLyFbuBeWrdmyONg7TJTBTrqZg3b6GGzbSzILYW8uSuF2hPG9l6uFdbPQRxzU8M2Lc62fpUJZNGC5TXAseNuVc2abO0pSKUsjdI+OdNoTzYc3fIANzF1LVTalK9KU72e1coa1TOqe3naA8inKGZ0QV5ZGzSywKWVrSAUROTjuno8lSLQbFq5kNrXsYAvQu5xmW9y18l0tjmrFu8ZM66C0nLabEsPGrT3xOlnIyXjkzC8tSxh2zRbWlsVNZtY6a9SKq1ZCd0rLHS17SPlgUtvpvatrVetlYJJZRpNcOOfmRaEN+s3Vctl0qCWs+PLljs19iWw+RdZEcU1VBFVUR6Kr5a6rplEzvnH5krF9Y33LnNFkqWIynAqZ3Zno3U03xO1mVY1HrGDxgOREpURkjiMXDUXOlsVpjRIJ0RXhix3KbUuzn6DLla6nK1RwFAKKK+GNsuigXReXW6mpRS2yWu6Zgr64Rq90abqclllYVJiJxIrAkI1JXRvJZoJJqUcY1yzmrvLnMLJX1QngWQrF9hTW01IZmwlt1F5bWtMTPruLc+fYltSVo83SKpnX/8QALRAAAQQCAQMDBAIBBQAAAAAAAQACAwQREgUQExQgITAVIjEyI0AkJTM0QXD/2gAIAQEAAQUC/wDH5Z2wu/scrHmBjg+P0hzXf0pGCSPjpnwT2bDa0LOWe6dEgCW06yYIWwRf0uVrbNdf79Grg2ZeUrxkMsco+CFleP4uRuyQvPITOjdyLzS4yy+Znqts7dtcbSZOgAB8V6Yw1nlziCE39obclR8EzZ4YrUM7vRy2PLVBpbT+Plv+Nn0RPZU42jJpc9HIwOhtqk8yU/j5dxMq+1YbrVaH2eUd/lsDpJG516zRMnjLSHRt0i+PlYss613Fli5OLBhOkwv1ShNG4PlDIqdzyunjd/l/k5NwFWu0dw/gMLlXhfFyHLD+SpGZbTq8GIR3Y7NCGKvRrd9fT5F4VgLxboXZ5ALXkgs8mFZt3I5vIvLzLYXnzL6lhfVYwvq9dfVqy5IEpzTG93618me0P9S5T96GPNQDWm+f8HifZuVlZWVlZXJnPILKysoytXsuUe0y27LHxzS92Y/ca72xzmWOW1cMcklSSKIMkbIzzYNrs8b6dO1HXYLsBaHAqS0yOTKyvLb37crZOQm5Bkcw5GFykuyqZ81iJ0mru9JgJ8bmHoGly1ds+KSNMikkXZsAduVo+5HKBwmW5mFzy5z70r43WJXEyuKz9ywjs8wzSQPdkuwUAcch/u9InavA0s2maqnMYpC1rmtjAV1zvHpVi1hiiQghz4cC8SsnUqxX0+svDrix9KgzLxeHHiiG/SX4+lyI8ZMFLVmgFz9nY2UELioNnqSRz5KEa/6AUpe0Miyrf8Dadnug6uQwOjgSyKye+WyIbAEgLuRoSxORwVLU2tTyOfJj2QlkY3ua8dGN0MhO2LmkK3bkgn7Ykjk4+KQ14BXj67YNkydqtE/VahagLVqwFo3f0PHlwe4NOSWRrh7agqxUEyZmGF9+IKG/G53Q7YPfaou9amEzV+wAI9BkY0k5PWtHOwy1d3V4zC38oKaq6WQfiw+FrIIqxXutiPRlfatWLVi0YvZTU4bDnVV4zkKpRrvUbS1F3tG4hbhbhbhS2WxtmmM0nHt0gysrZZWfR7rPXKysrZbFblblbruFZ990Nc7BCYpsxXdXcWy2WyysrPXuxrvMK7sa1ytF212120RqMZGFhY6BAoFArZZWVlZWfTC1zi+0c15y9+q1WgT4F33KOUl+0a7jMtfl2PTn4K+S0xPDoIe2srKyrE2vSGPuP7LF22/EEFq5dtybDlMAYMrZbLdOsgJ7t3KJj4xn4crK2QkKDgfTnpMThmNU1jXMbNogc/DlZWVno1+FsAvz6H5x0/KhZ7/GR0wgPd7tjD1x0f8Auoxs/wCHCwtemOuUx4ag8FZHV8bcqu33+LKysArt5WpWq1WOmShIQnSZBTBs4eyz1z8AKygvZaharC1RYsdQcESLcL8rJWVn0Z6gdG9MrKys9CAUWLtuWvUEhCRbDp7rZbLKCCygvx6s9AUCisBYRCPTKyUPQ0ooOKBK/8QAIhEAAwACAgIBBQAAAAAAAAAAAAEREBIgIQIwURMiMUBQ/9oACAEDAQE/Af5k9E9yWITC9S7RCCIQhCEGuyEcPFMTYrCYsxTrDYmVQTKhPouPJ9GyNj6iG7mEIRkZGPxZGR8aTofiRkZGM6OjY/OahNFp38lZWX5NkXxPtxuzZlNjZm5ubmxc01RqakIak4XhSl9NJxf6cJxvNCxCelMp/8QAIhEAAwACAgIBBQAAAAAAAAAAAAERECASMAIhIjFAQVBx/9oACAECAQE/Af1d6LumXZs5MTLhn51pR5WlKUulz5JLFLrR/XH8ITEIQhCCHld3IbRUesez2Px0jI8PERxIz5HyPZxRxWkIQmvI5FLil6Z137C9NJ2XFL0MhD//xAA2EAABAwEFBQcDBAEFAAAAAAABAAIRIQMQEjFBEyAiMlEEMDNSYXGRQIGhIzRCklAUQ1Nwcv/aAAgBAQAGPwL/AKfYHfyMfUttf+M1TXNyIpvHCQY+icw5OEI9ktdKBbR3sAmjZDZkxnW6TQI2HZK+a00CDG/Ri3Zm3mjonWNtGMZOTJgCdTCIaS8+ixOOCyCDLMU7sWVnQxJKaHEyMy2kqWyLSYxJwtHS5u/atiOK5z7USGmIQAHdktMONAsTnEn1WQKnojgjCdE21FAUW2b5I3aHStzZ1r3jP/d5uDbV1XyWgKzrAy3Xn+L+IXWTj5e8s2aRN2SOhVm1woXLDo1oQazmOSGLOK7hY9shYdckxvQDvGWvQxuMeBiIOSbNjs36kpjvKZXihSHhOfnhE0TuDDHrdaECGMdLu9w6khYncrBiKlBozJhWTHiHAqyd6Qms+VJsmfCwhh9k97C8EDqn/quZHlVO2Wi4e2OVO2KnamrxbIr/AGimi0OA9GL9qFXsZVeyPVezWirY2qq20H2Wbv6qy+E5hzFEFZgecKwI1Vh91bOGmV1B6K1Vr9t9vsN3mCqAm7N7SOjdE0NqQZTrTrc1ztCrJ4PC3VWDcQnF+FbvLhzfhYmmicMfKuF04skQ+eI6LFtBms0xhNXH4v2MVWIHhELCDiGvoqHWE6rWwadUHTJb5dQuE16ojaEjOt0OEX0ErDBk6IF7YnqjgYTGcLw3wpwOj2WqqFTNE4qnOViJWCaR0VXnKKKr/wAKTfJMlTEjVsolZXNoAIzRuBmEHWwaGnJzRRbTZ8PnCLZaGn0WS5KrCLM1WK0xD0OS8Jhn0RH+nZ/VeC1eC1eEFyflYHWsTkAuZ/yoZaf2Xij7hTtW/YLnb+Vzs+VLsvRybaEV6SjhENu2kNwN8yfbFoMcrf4p1o9pwikTQIl1nXQkXVXCGhYiYJ8rl+4tGTlAR5nR/IthQVS4j4WztHEnQlgVLX5YtFUwvFHyqWjflcy2r3WZZ5SjifiAyXpdha8hvRCGzwprA0kzWEABT3XCQPcKpCwsIy6IY/xRTjeD7ysAM+u5ov07LaHoVithx9JyvoB8LIfCyU7Ie+60sPG3MXHEeEZIVr7qoaUDQP6obR0x0CptPhBhDhN9Ci9xDoya0IutHusmt/iFBIXDakey8QlZ31c0fdTuY2wAeqxC0OI5yoxk+l+MWpb6XfrAV0WOyAprcOAn23ch8LLcxPxfK4XfKzCqVkhxqhquMrNZrNTzegWM0U6uP00rJThF2ar3WfdSPo5mAFDcuqwu3JYYN3EQAuZRKw4e+e3QhYYWI825hGt0aLJZd5kslxKBu5IuN2hnvc+4gIzdzQVhNfX6CqpuZX0VR39d83D6ckG7F/kafT0/xf8A/8QAKhABAAIBAwMDBAIDAQAAAAAAAQARITFBURBhcSCBkTChscHR8EBQ4fH/2gAIAQEAAT8h/wAiv8iof60/24fSvm0naH+R2aUdppQR8PVerRTWafXUA+lrvlRRsJt2f+xcK5o6rMHN0LZb9Fagaq0EyEPYezzAGwavL67l+jb1sex1ucH2lNKQvo1+4DXUq1qO8JQuOPmZPNWNPbllNUa93l+m+Nx3niXqZkfLEtIvwwS75Bt1qXL9H43mjIKjs5hxLIxhtWEwAKAMH07uBuNpYwtVXCGs7xLQcmZjdZmpBJoLnaFJ1hXpOcFSE2YaxxFP5/qcz+iXToFmTpK7yt+RC1GWVyrPaHXZjILVX8kNe0A+l+w+psg/PfTViLG0CD8QCO8wRgYDiC7aYcs8evd6Brtt3jBCFweZUJVb7fUI7W74YEcS8LFVhJzjk4dy8SodQh3BdmyEXRzd7TFspRGYByYeUzF14jPPEuXLly5cuX1voJWze2sQ9Q9zg+amaprCQ2IEoCSuY63Ir4MUahd+BmIVIZuUJECnsXWXLxBDX26+XmU6Xz/7B6iXK05n8hGGqPmbfyP/ACbwnQ2SxsPmU6p4Z+gVlGn8XL6L7f8AJtJ7Q/KUi17sMo5YxypaCW4JWPpGGnmOw2v8iFmYsfKLYjkdZeDFDDg0nxh+YLPL+3rAovb+8vPUvzA65saxNfuiJo4RLXF13F2lmFXuvaKkPabIc4ZYEFrumMtNnH9E5U7Xd/MEFXvNB7FuMe0c02mB3mVhstCBhU0/pNAtCaNTXRMJW6svWpfUs6vbSB84N+NZSDuiCsttdle72mPNFBy4gHLLvAbbzAzStbf3M1+rqfeaZZioic9GqZcBKxw6mYehtWyxgJ6A0l8UrYI2w+TpmbVfCc8e01A7G4Am8NmW9XzxHqqqOF68w02AWwwaR0UXXYymRduZhOHzFc3L8ydyHa660DiXiJbc7qbQ68TJeQN5lUp3IxjxlldJXAGhvzGQDjQla/mO1nlbX8SpaWtplxI3wfuMXhYM1gea6UwzwhqIoFb6IX3dfboerh4s/c7Ku7jYbcZBKfAP4hEIvg/xCqWcYJrnusF0L2ilrPtY/UeCdwsCgzQq1kzPaNZXE8vB0QuFCtP2R/SzWKmP5lZq66aINj8zdH3JY2L3b/EUWNVZT7SgKpYEv6iCaNkipsd5QBFfMK7/ADLhKuriEWio7PmWrwcAzdF4xALHlbKs4Z1wsK+kLuRnGtlWvBMmobbEsBvLa4Ra2bGWPmIdgfeWyhbQxMealG6ViFVJbmACj/e8MOBdG1M5KoWzlPfQP2TdqXYgVMbhBCOIfJjqCjWwEDunsDxEaxiLGc+YGofiC6/tph0fEbq08FzOOphG5asjVVFSkYRPapngwWxcu0vBdTFabfWF2AxjqRcMdpCHIuhjHRaq1shjR+YLyRaBfeDFw3B95hI3XGcc98n5iGQXeCM9ykB5sGtyXMwjvSacC9j0UgA0epLcxoY1vwIuGsVEyJgECgfuUxBo3SqX0bqmOle5Fwz9XSSp7y5TclPW+DjyysaQ2D7yoIZQUVASNWtGaMDyJZG1bMueKBkF4emONKdQe8fmlpZKmGwDaCjdRVzyl+r5RZctlwODPeW5l5eWnej0a07kyste7Cuz4iOp+IbRXiF0fvmcLfaBgGB59RCuYRi1grWpmq3zACxuMsW4ipmHSFCF5eEAxPoFO6HfPOX6g+h0Hr241UgcciUSu9EJR2iYsUkpMCjTWLHiCiA7Cd0TDl5ljaUzMJfQMGEBfQvMZ3mqnuQnZf4ej09wdMswMrA4BbDfiY6VK6VAgQ6e2d5Ei4qWqn5s+itCbuWLqhlWkq2LKEXLOty5cvqlICFMPQZcHouVl00QXXQwuRGdtTZDAmnruX12bcwwxnnJGlohhFSuj0Ybtvo6KU/mKNxw06XL6X6UuLMxjxEbIUS+eOldNT7zpWodT1r8S0So9Fsy1mBrWLawbfpjeawPRVbNOteu6hB2RJpKbpkjKiWOgWj0pKSXuUpKCg6bJfRcuX1GX0CxLzOdyKnhMtou0sa9L5JmoXcg2sE0PQOcoy+lstCp7dIO81QWXhJAJh0Zhme2lG0EaxxLeickGmHRljeW3gYGMiJWUqDT0rLS24nU3GkrAgLhBQ5orOopHhhHWKMs/9oADAMBAAIAAwAAABASIMVBgAVIggAJsGy6fNBiyj4Y5ptsnyTbFtvCz9pNNPGuqMCNo42YQIEExL6CRYMEGT8YCBzUGdVEHKQHraFgCRaW/wDNpnycuGNdceiyLtY4mcgOiOu29EEGuHlAnRrvBwEb0uqOJE43dRwqzkz2egbGwwUOslkwzPIcsSwSNhRUkWEw1v62L+JMcNPr2AmjywACL2YgqfCuq0/Cz+/jqnaGEcefx1OE4WV4cia8oyMQ8U8lMsIgsWO//8QAHREAAwACAwEBAAAAAAAAAAAAAAERECEgMVFBMP/aAAgBAwEBPxBc1+a/BIhCcITMI8QhCYQhCEJkvMQmYQhMwSNeZGhNUhCEIQb2JLs6VO48HoK5+AEVawVlRxOosomXwd8GnZFXhBRoo6jcWhEUOTSFpEsbUKcC6hquh+Q9qiTHo2Gy+i7hlYQVKEyMkG6xMadEsQVNWsKSdaxKa3svsSIaTUmSLsaJEyxoR7dxN2w294KG1dcCJhIQvQkXwVG3IpKLNtFFEf038E3ME6JsbQ4LKEhtzEIQgmkJBlpkEt46D4xkZcREF0PMJiix8T5k1yH+A//EAB4RAAMBAQADAQEBAAAAAAAAAAABERAhIDFBMFFh/9oACAECAQE/EPwf5PaPLlKXwo8u0pSlHxtGUpcdGmMo/RWlC6rOhZS5zhwLrp0UmC+CpFGXTp0aFzo0Khvgvd8QpR+8Uo8UY3hhO7WUKvQfs9qhB/Q1cMLofRRZwoyLzYIjmNwtyoqx5BNoX9YkbbejnwfUEgxiqXWPwCf4cfBQoKFzOCBKesbMOHCLwvBFnCFFE4bIRBUylKUqIyEEGxKimUpcjwmijeLKUuVFHlekUospdpk/Fii0nkmn/8QAJhABAAICAgICAgIDAQAAAAAAAQARITFBURBhcYGRobHBINHw4f/aAAgBAQABPxDweDX+J4P8jfk14NeVQJUNf4G/J4NeKleKh4JQyvDDwHipXivFQJUJUrxUrxUDuVK8ceArxUJUqVA8HioeK8VAzKglSoVUqVDLKhiV4rzUCoFwxKlSpXgPBAuVK8VKrwF+K8VApm5UCV4rxmVCVA81KlngPAY8V4qV1L8DfCB7N8RCCVTnDfgMeK8G5UJXgPJhh5NeefBszFrbCQytzUeUao/D74+vBr/AgAyf4TDfk8BC0HvMPJrzz5Du/sDX4afqAmGh09Z6tZ8y6HhnL0DxVZuAzNHW4FtX6iIo7J/LlggsaQei6lY9npH/AFNo2ptfvweTUuoeUhnWfias6ur9zmvJvwbOtJ6ixUpjK35UfuXT0sbc6a5cGnnUL5mcCXrzLchY3eC3HuH3Uh0/D9mofTOTtN9iw35PBr/Ac8U7vqA+qD5uBejEvV1kHSBKE5R22G1rFxXpUFJYPmYeA58heEtci8c45jURYWjAr6YsPtTBr6p1QtXvZiUhnAA9EqG/BL8GvF+HPAhZtt/Ep6IEFjWWXZEyZxhjcAsIVY6kJuM7G4jJYFaxpL6xBJXdgs7L3DZCXPuskrndJk1KfdVNat1CRLa/LF/QQxLhuX4PA/4VRxeHLBSZcWf99S27qvcugnIGo2dXu2sS82b2g/GU/MunLN0XKR9RXnZipcJeTeMnCR4FO+1/In8VEYLeinvEoIwVXoGXnxcJcGpfi/Fy21LB7I/QfuXRjHXqK8gK5zKKcge5qpOkLtH81MXGMwG1V9/qBRMNPJuMY1SJ6Zg5lwzDEepTJTCOyvUSXhBnJM/khigpQ1Qv9+L8DDEuGZcuXLmJy595j8JEMc8nuC1NlOYZQwYgoYo0vrHxDJYqMeAChgzKA1gouBzr1iKCjyip+TcPydMB03LYrV5B7uOogpwsP/EaDsTkPzzK6RwxgYYzbLC2ZleUPuA7/crA3mse/AtMIMvwuKgIR/JSndEl3GvmUJdIWrx7blVdY7bq36i1x4YU2iJHJpkW20V/ZNdWx0Fv1REywUgayt8QlCxGmUPVal73duXYUnWY+VQ5Vkvp1Ag0hWzxDsCsXKtreYa0/wDbifph/wDkpH0qKek5slT+CIaofwlXT1a/9MP+GH5h/wB0PqaXb0oftGVjP1D/ALmeGP0e9zIIYbq2kjuNCnKUn9MAvw3aQZgIXxSv8XKN2Iv0f+yWSW7IOyCu8DX+CATBIHSMWMyI3ofUAs5L8mJc6D+IMN6h7ePz/cKYvEpSSoVxhPc7rmPMHW38zcW1eWqOWAiW1MVH4jixHSNPq63CEMEwbVAtddYleJbjRl+6qUt1UOMD8x6hdbNH3OdTEKNn3uYnWIotw22VL6i1l282Y3BCipGSWhRzahznsOD76iAbC4lVV25rqG3MRWFkeviCur66Mct/MICcbEf7V7ghVYEpzTpqFMewB7H7lg2lxHBUByqDApdpbLOHlsg7m7CgEPbvqc3VboZs7UcmYEolD8gcGV/UE4ubQVrDspUiXl23DrBwRa6lX2IrB2HTqLvOkKi3pemJetOKgvvC7GOIgruagHj22wp4akoviWsDVT8BmYYyWD9LnBBXAfoYpCBtFdrgibPAo/mGxbGKaEFBQIhVs1BrbVCoYrPUGI40OBqpS3BgF9lwUjdg5be4fSpbgAbN6lmQ2Jw5hzC5q1qIuyH3/uYsKtqcFEDqLQa8BadkDjGVt7gxY52EBmfsodOLYW6TiLZmtcnpllt3zKfRULQeUNkDIQVQ9Ff5lSnC/dWRunxDrAWE/T/CKLUlTl81iG04NeTdNFhBjiqVjdUX+Suos14DB3m7/UOlfVaPshiMBuGIXw1mWaer/wCkSLT+T/2Jf936ilV+I/7iREraYdFtsuA2+RGbJMKx8lJYIdJ/YV/UCVpV0n+iYILiy/qU5FqApirNIF6v1dxZbfwGYPzAryVXA85iHAPqGrsbZbeqMsKUJysHNv7I/FtkKAdFZwOIWOYw1Zsbz+IgC2um/lhhRL7yfqGKZ7xXaBmJzVNxbsY+KgZZbSfOFX3AboByDpRcx0HPYk/gIWAGjp9wJXC+oGmdIVbhE/uPyjmUfUb9WRDCBz+3CRAtrtSX6iStHACJ00uQJG30oN/zKAObBH5ghoDQbNAZh0hYGwesRpxTYNn3M8XUvGTdAbhRDqWQ5RfxLD8hS2NZ0IWX0ypT1Yqgdo3KBm0HyWMsIkDDQv7QutMrDgjS9trKAWqfiVhQ0OEdVHLE4pVKutai4IfbcRaHwVMBT9kIKi7Mv43KuOoPkbgk66BXXANRgEnuq/qUdpdmQ/1HgPoCBsd/B+poNfRSMQzT7Vxof3CgoFBxqV1DBEmURG919Ra5zFyNa+O4EC9qA4O+YLAIWyXNPMVlScBr5qcc8llH2wMABLUvYO/cGGRtbVwVnqYQBQ1/lg49ExPtDEHJvqC8nyxGE4ZV9wS4xFo6tbFUaFKj1/b+ojAGFMH1RhzbxQv7shIe6Av4JyvmEsVZAvISkembc1pl36c0Hmqz+5VygUUjd0R6OEhZTwJxHTZzQpPUpWRUKrftCMsCANFcymG0C8uqmp7kBXsgC3pZW4zFwW+kJkYmEfZbK8MpBpD8za0H5LYpgE5HmLL4S6a/E4AHRiLberLAAIU3doNi6JaY16Kl3gMYQQpHqXCTGK7iiHAEfctwAMl1ACDZGZIjAHhP9gmxYd0uZuDgbf8AyJllcAPVzMwCAqjBDDZgm385nymeL8C93FMbMMoyZIXZLu/zBTUZr2mXdxLcTNsaNvzO1Ms51/cA1T5ifvUIfUIUCO6GYMBDWH8SyIsutf4gQfGEPKHVDNpOYIr0gO7gJRge4B5I+k+5R4RBU1OiEBXdSdBaaYgwASymJ0xOmNu0DxLy8HMxgR5IdcC4IhiA9koep6SYdwzbCrCJ8qWgo3cHRiW6i1t8uplil/Gm+EDlhl7+IQriMAIlZgIkN1wwlhiFNqmbEbag5Z+WVoNtRWRiYR/HxADMInphBTljsbtmU1Z/gbzMPSuJWSeADDBlpK9R844ZlatMdyuLdW9S1tSrb3KFEVL9Eq0s0bgUsaYAOAPipUv1LmagX4Lwxu4kjlTQJqPVKbt6jpQ8BuZKUtrtcE6f3BHMwzcvFNF7iaBOiwmzwsOjqWBytSlBIVYSImoGtQTiAMqnDiEA6geoV4hhglzidqIWLEpFPq4I5H7lBiHJntZbuDhMI21AlSVV7uN2K5gwnXtqV7OxsqN3aLINwxATklvqX8RQiHuNdXFDzHOdDEsiibDDMuKdysqyYxKoqwgiWhZDUs7auJaGZbGLNcNRmwMZ4mIAqoKcwvLy3uWlstiyyDpAe40mHDcNKMM4mrBo9Rql+0o0V4q6xLhQY9w1j6eBRspuziNNtwcwblPH35CF9ZnqSnZHWZbiUjAm7j7cIfkQo4s4nLrTcUFojCAm0WJlBumAvA0YCENztcMQS5Y+BCDbCzczZgiXYl6wgbC/MM1MTBZNUS1kgJOBItSqTRheZaluO2c2/Ex/A6gOYM4Z8LlvH4wctYPgKMrrNz0kaSFfBcQMbTjNkVebSsAZEYVpqUXFUIMTOEVEzSZaSS9QXSoEwwdZSWPNSnWYcxGiy1hd7QEtxE6VC8oBhFOZbOXuCXgQz1JRZhEsa8GAimGoqB4BcGhixA8DEQc3Fc1LW7gsweg3Lo024ah5Q0wDmHMZ3IicQl3RmGShHATpwWJEjhZUcytCWLOYRDCktgtnuAFhmYO5vRP/2Q=='; diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts new file mode 100644 index 00000000000..63e1db83e89 --- /dev/null +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -0,0 +1,287 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { AIError } from '../errors'; +import { + CountTokensRequest, + GenerateContentRequest, + InferenceMode, + Part, + AIErrorCode +} from '../types'; +import { + Availability, + LanguageModel, + LanguageModelCreateOptions, + LanguageModelMessageContent +} from '../types/language-model'; + +/** + * Defines an inference "backend" that uses Chrome's on-device model, + * and encapsulates logic for detecting when on-device is possible. + */ +export class ChromeAdapter { + private isDownloading = false; + private downloadPromise: Promise | undefined; + private oldSession: LanguageModel | undefined; + constructor( + private languageModelProvider?: LanguageModel, + private mode?: InferenceMode, + private onDeviceParams?: LanguageModelCreateOptions + ) {} + + /** + * Checks if a given request can be made on-device. + * + *
    Encapsulates a few concerns: + *
  1. the mode
  2. + *
  3. API existence
  4. + *
  5. prompt formatting
  6. + *
  7. model availability, including triggering download if necessary
  8. + *
+ * + *

Pros: callers needn't be concerned with details of on-device availability.

+ *

Cons: this method spans a few concerns and splits request validation from usage. + * If instance variables weren't already part of the API, we could consider a better + * separation of concerns.

+ */ + async isAvailable(request: GenerateContentRequest): Promise { + if (this.mode === 'only_in_cloud') { + return false; + } + + const availability = await this.languageModelProvider?.availability(); + + // Triggers async model download so it'll be available next time. + if (availability === Availability.downloadable) { + this.download(); + } + + if (this.mode === 'only_on_device') { + return true; + } + + // Applies prefer_on_device logic. + return ( + availability === Availability.available && + ChromeAdapter.isOnDeviceRequest(request) + ); + } + + /** + * Generates content on device. + * + *

This is comparable to {@link GenerativeModel.generateContent} for generating content in + * Cloud.

+ * @param request a standard Vertex {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContent(request: GenerateContentRequest): Promise { + const session = await this.createSession( + // TODO: normalize on-device params during construction. + this.onDeviceParams || {} + ); + // TODO: support multiple content objects when Chrome supports + // sequence + const contents = await Promise.all( + request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) + ); + const text = await session.prompt(contents); + return ChromeAdapter.toResponse(text); + } + + /** + * Generates content stream on device. + * + *

This is comparable to {@link GenerativeModel.generateContentStream} for generating content in + * Cloud.

+ * @param request a standard Vertex {@link GenerateContentRequest} + * @returns {@link Response}, so we can reuse common response formatting. + */ + async generateContentStream( + request: GenerateContentRequest + ): Promise { + const session = await this.createSession( + // TODO: normalize on-device params during construction. + this.onDeviceParams || {} + ); + // TODO: support multiple content objects when Chrome supports + // sequence + const contents = await Promise.all( + request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) + ); + const stream = await session.promptStreaming(contents); + return ChromeAdapter.toStreamResponse(stream); + } + + async countTokens(_request: CountTokensRequest): Promise { + throw new AIError( + AIErrorCode.REQUEST_ERROR, + 'Count Tokens is not yet available for on-device model.' + ); + } + + /** + * Asserts inference for the given request can be performed by an on-device model. + */ + private static isOnDeviceRequest(request: GenerateContentRequest): boolean { + // Returns false if the prompt is empty. + if (request.contents.length === 0) { + return false; + } + + // Applies the same checks as above, but for each content item. + for (const content of request.contents) { + if (content.role === 'function') { + return false; + } + } + + return true; + } + + /** + * Triggers the download of an on-device model. + * + *

Chrome only downloads models as needed. Chrome knows a model is needed when code calls + * LanguageModel.create.

+ * + *

Since Chrome manages the download, the SDK can only avoid redundant download requests by + * tracking if a download has previously been requested.

+ */ + private download(): void { + if (this.isDownloading) { + return; + } + this.isDownloading = true; + const options = this.onDeviceParams || {}; + ChromeAdapter.addImageTypeAsExpectedInput(options); + this.downloadPromise = this.languageModelProvider + ?.create(options) + .then(() => { + this.isDownloading = false; + }); + } + + /** + * Converts a Vertex Part object to a Chrome LanguageModelMessageContent object. + */ + private static async toLanguageModelMessageContent( + part: Part + ): Promise { + if (part.text) { + return { + type: 'text', + content: part.text + }; + } else if (part.inlineData) { + const formattedImageContent = await fetch( + `data:${part.inlineData.mimeType};base64,${part.inlineData.data}` + ); + const imageBlob = await formattedImageContent.blob(); + const imageBitmap = await createImageBitmap(imageBlob); + return { + type: 'image', + content: imageBitmap + }; + } + // Assumes contents have been verified to contain only a single TextPart. + // TODO: support other input types + throw new Error('Not yet implemented'); + } + + /** + * Abstracts Chrome session creation. + * + *

Chrome uses a multi-turn session for all inference. Vertex uses single-turn for all + * inference. To map the Vertex API to Chrome's API, the SDK creates a new session for all + * inference.

+ * + *

Chrome will remove a model from memory if it's no longer in use, so this method ensures a + * new session is created before an old session is destroyed.

+ */ + private async createSession( + // TODO: define a default value, since these are optional. + options: LanguageModelCreateOptions + ): Promise { + if (!this.languageModelProvider) { + throw new AIError( + AIErrorCode.REQUEST_ERROR, + 'Chrome AI requested for unsupported browser version.' + ); + } + // TODO: could we use this.onDeviceParams instead of passing in options? + ChromeAdapter.addImageTypeAsExpectedInput(options); + const newSession = await this.languageModelProvider!.create(options); + if (this.oldSession) { + this.oldSession.destroy(); + } + // Holds session reference, so model isn't unloaded from memory. + this.oldSession = newSession; + return newSession; + } + + private static addImageTypeAsExpectedInput( + options: LanguageModelCreateOptions + ): void { + options.expectedInputs = options.expectedInputs || []; + options.expectedInputs.push({ type: 'image' }); + } + + /** + * Formats string returned by Chrome as a {@link Response} returned by Vertex. + */ + private static toResponse(text: string): Response { + return { + json: async () => ({ + candidates: [ + { + content: { + parts: [{ text }] + } + } + ] + }) + } as Response; + } + + /** + * Formats string stream returned by Chrome as SSE returned by Vertex. + */ + private static toStreamResponse(stream: ReadableStream): Response { + const encoder = new TextEncoder(); + return { + body: stream.pipeThrough( + new TransformStream({ + transform(chunk, controller) { + const json = JSON.stringify({ + candidates: [ + { + content: { + role: 'model', + parts: [{ text: chunk }] + } + } + ] + }); + controller.enqueue(encoder.encode(`data: ${json}\n\n`)); + } + }) + ) + } as Response; + } +} diff --git a/packages/vertexai/src/methods/count-tokens.test.ts b/packages/vertexai/src/methods/count-tokens.test.ts index 7e04ddb3561..78c51d3f5b7 100644 --- a/packages/vertexai/src/methods/count-tokens.test.ts +++ b/packages/vertexai/src/methods/count-tokens.test.ts @@ -27,6 +27,7 @@ import { ApiSettings } from '../types/internal'; import { Task } from '../requests/request'; import { mapCountTokensRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -66,7 +67,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(6); expect(result.totalBillableCharacters).to.equal(16); @@ -92,7 +94,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(1837); expect(result.totalBillableCharacters).to.equal(117); @@ -120,7 +123,8 @@ describe('countTokens()', () => { const result = await countTokens( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.totalTokens).to.equal(258); expect(result).to.not.have.property('totalBillableCharacters'); @@ -146,7 +150,12 @@ describe('countTokens()', () => { json: mockResponse.json } as Response); await expect( - countTokens(fakeApiSettings, 'model', fakeRequestParams) + countTokens( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith(/404.*not found/); expect(mockFetch).to.be.called; }); @@ -164,7 +173,12 @@ describe('countTokens()', () => { it('maps request to GoogleAI format', async () => { makeRequestStub.resolves({ ok: true, json: () => {} } as Response); // Unused - await countTokens(fakeGoogleAIApiSettings, 'model', fakeRequestParams); + await countTokens( + fakeGoogleAIApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ); expect(makeRequestStub).to.be.calledWith( 'model', @@ -176,4 +190,24 @@ describe('countTokens()', () => { ); }); }); + it('on-device', async () => { + const chromeAdapter = new ChromeAdapter(); + const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true); + const mockResponse = getMockResponse( + 'vertexAI', + 'unary-success-total-tokens.json' + ); + const countTokensStub = stub(chromeAdapter, 'countTokens').resolves( + mockResponse as Response + ); + const result = await countTokens( + fakeApiSettings, + 'model', + fakeRequestParams, + chromeAdapter + ); + expect(result.totalTokens).eq(6); + expect(isAvailableStub).to.be.called; + expect(countTokensStub).to.be.calledWith(fakeRequestParams); + }); }); diff --git a/packages/vertexai/src/methods/count-tokens.ts b/packages/vertexai/src/methods/count-tokens.ts index b1e60e3a182..81fb3ad061d 100644 --- a/packages/vertexai/src/methods/count-tokens.ts +++ b/packages/vertexai/src/methods/count-tokens.ts @@ -24,8 +24,9 @@ import { Task, makeRequest } from '../requests/request'; import { ApiSettings } from '../types/internal'; import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; +import { ChromeAdapter } from './chrome-adapter'; -export async function countTokens( +export async function countTokensOnCloud( apiSettings: ApiSettings, model: string, params: CountTokensRequest, @@ -48,3 +49,17 @@ export async function countTokens( ); return response.json(); } + +export async function countTokens( + apiSettings: ApiSettings, + model: string, + params: CountTokensRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + if (await chromeAdapter.isAvailable(params)) { + return (await chromeAdapter.countTokens(params)).json(); + } + + return countTokensOnCloud(apiSettings, model, params, requestOptions); +} diff --git a/packages/vertexai/src/methods/generate-content.test.ts b/packages/vertexai/src/methods/generate-content.test.ts index 13250fd83dd..16a48f473ad 100644 --- a/packages/vertexai/src/methods/generate-content.test.ts +++ b/packages/vertexai/src/methods/generate-content.test.ts @@ -34,6 +34,7 @@ import { Task } from '../requests/request'; import { AIError } from '../api'; import { mapGenerateContentRequest } from '../googleai-mappers'; import { GoogleAIBackend, VertexAIBackend } from '../backend'; +import { ChromeAdapter } from './chrome-adapter'; use(sinonChai); use(chaiAsPromised); @@ -96,7 +97,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Mountain View, California'); expect(makeRequestStub).to.be.calledWith( @@ -119,7 +121,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Use Freshly Ground Coffee'); expect(result.response.text()).to.include('30 minutes of brewing'); @@ -142,7 +145,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.usageMetadata?.totalTokenCount).to.equal(1913); expect(result.response.usageMetadata?.candidatesTokenCount).to.equal(76); @@ -177,7 +181,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include( 'Some information cited from an external source' @@ -204,7 +209,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text).to.throw('SAFETY'); expect(makeRequestStub).to.be.calledWith( @@ -226,7 +232,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text).to.throw('SAFETY'); expect(makeRequestStub).to.be.calledWith( @@ -248,7 +255,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.equal(''); expect(makeRequestStub).to.be.calledWith( @@ -270,7 +278,8 @@ describe('generateContent()', () => { const result = await generateContent( fakeApiSettings, 'model', - fakeRequestParams + fakeRequestParams, + new ChromeAdapter() ); expect(result.response.text()).to.include('Some text'); expect(makeRequestStub).to.be.calledWith( @@ -292,7 +301,12 @@ describe('generateContent()', () => { json: mockResponse.json } as Response); await expect( - generateContent(fakeApiSettings, 'model', fakeRequestParams) + generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith(/400.*invalid argument/); expect(mockFetch).to.be.called; }); @@ -307,7 +321,12 @@ describe('generateContent()', () => { json: mockResponse.json } as Response); await expect( - generateContent(fakeApiSettings, 'model', fakeRequestParams) + generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + new ChromeAdapter() + ) ).to.be.rejectedWith( /firebasevertexai\.googleapis[\s\S]*my-project[\s\S]*api-not-enabled/ ); @@ -347,7 +366,8 @@ describe('generateContent()', () => { generateContent( fakeGoogleAIApiSettings, 'model', - requestParamsWithMethod + requestParamsWithMethod, + new ChromeAdapter() ) ).to.be.rejectedWith(AIError, AIErrorCode.UNSUPPORTED); expect(makeRequestStub).to.not.be.called; @@ -362,7 +382,8 @@ describe('generateContent()', () => { await generateContent( fakeGoogleAIApiSettings, 'model', - fakeGoogleAIRequestParams + fakeGoogleAIRequestParams, + new ChromeAdapter() ); expect(makeRequestStub).to.be.calledWith( @@ -375,4 +396,25 @@ describe('generateContent()', () => { ); }); }); + // TODO: define a similar test for generateContentStream + it('on-device', async () => { + const chromeAdapter = new ChromeAdapter(); + const isAvailableStub = stub(chromeAdapter, 'isAvailable').resolves(true); + const mockResponse = getMockResponse( + 'vertexAI', + 'unary-success-basic-reply-short.json' + ); + const generateContentStub = stub(chromeAdapter, 'generateContent').resolves( + mockResponse as Response + ); + const result = await generateContent( + fakeApiSettings, + 'model', + fakeRequestParams, + chromeAdapter + ); + expect(result.response.text()).to.include('Mountain View, California'); + expect(isAvailableStub).to.be.called; + expect(generateContentStub).to.be.calledWith(fakeRequestParams); + }); }); diff --git a/packages/vertexai/src/methods/generate-content.ts b/packages/vertexai/src/methods/generate-content.ts index 5f7902f5954..ff99b306855 100644 --- a/packages/vertexai/src/methods/generate-content.ts +++ b/packages/vertexai/src/methods/generate-content.ts @@ -28,17 +28,18 @@ import { processStream } from '../requests/stream-reader'; import { ApiSettings } from '../types/internal'; import * as GoogleAIMapper from '../googleai-mappers'; import { BackendType } from '../public-types'; +import { ChromeAdapter } from './chrome-adapter'; -export async function generateContentStream( +async function generateContentStreamOnCloud( apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions -): Promise { +): Promise { if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { params = GoogleAIMapper.mapGenerateContentRequest(params); } - const response = await makeRequest( + return makeRequest( model, Task.STREAM_GENERATE_CONTENT, apiSettings, @@ -46,19 +47,39 @@ export async function generateContentStream( JSON.stringify(params), requestOptions ); +} + +export async function generateContentStream( + apiSettings: ApiSettings, + model: string, + params: GenerateContentRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + let response; + if (await chromeAdapter.isAvailable(params)) { + response = await chromeAdapter.generateContentStream(params); + } else { + response = await generateContentStreamOnCloud( + apiSettings, + model, + params, + requestOptions + ); + } return processStream(response, apiSettings); // TODO: Map streaming responses } -export async function generateContent( +async function generateContentOnCloud( apiSettings: ApiSettings, model: string, params: GenerateContentRequest, requestOptions?: RequestOptions -): Promise { +): Promise { if (apiSettings.backend.backendType === BackendType.GOOGLE_AI) { params = GoogleAIMapper.mapGenerateContentRequest(params); } - const response = await makeRequest( + return makeRequest( model, Task.GENERATE_CONTENT, apiSettings, @@ -66,6 +87,26 @@ export async function generateContent( JSON.stringify(params), requestOptions ); +} + +export async function generateContent( + apiSettings: ApiSettings, + model: string, + params: GenerateContentRequest, + chromeAdapter: ChromeAdapter, + requestOptions?: RequestOptions +): Promise { + let response; + if (await chromeAdapter.isAvailable(params)) { + response = await chromeAdapter.generateContent(params); + } else { + response = await generateContentOnCloud( + apiSettings, + model, + params, + requestOptions + ); + } const generateContentResponse = await processGenerateContentResponse( response, apiSettings diff --git a/packages/vertexai/src/models/generative-model.test.ts b/packages/vertexai/src/models/generative-model.test.ts index 3ce7173e03e..71d4be823ee 100644 --- a/packages/vertexai/src/models/generative-model.test.ts +++ b/packages/vertexai/src/models/generative-model.test.ts @@ -22,6 +22,7 @@ import { match, restore, stub } from 'sinon'; import { getMockResponse } from '../../test-utils/mock-response'; import sinonChai from 'sinon-chai'; import { VertexAIBackend } from '../backend'; +import { ChromeAdapter } from '../methods/chrome-adapter'; use(sinonChai); @@ -41,21 +42,27 @@ const fakeAI: AI = { describe('GenerativeModel', () => { it('passes params through to generateContent', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { - functionDeclarations: [ - { - name: 'myfunc', - description: 'mydesc' - } - ] - } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { + functionDeclarations: [ + { + name: 'myfunc', + description: 'mydesc' + } + ] + } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -86,10 +93,14 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to generateContent', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - systemInstruction: 'be friendly' - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + systemInstruction: 'be friendly' + }, + new ChromeAdapter() + ); expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly'); const mockResponse = getMockResponse( 'vertexAI', @@ -112,21 +123,27 @@ describe('GenerativeModel', () => { restore(); }); it('generateContent overrides model values', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { - functionDeclarations: [ - { - name: 'myfunc', - description: 'mydesc' - } - ] - } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { + functionDeclarations: [ + { + name: 'myfunc', + description: 'mydesc' + } + ] + } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -168,14 +185,20 @@ describe('GenerativeModel', () => { restore(); }); it('passes params through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -206,10 +229,14 @@ describe('GenerativeModel', () => { restore(); }); it('passes text-only systemInstruction through to chat.sendMessage', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - systemInstruction: 'be friendly' - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + systemInstruction: 'be friendly' + }, + new ChromeAdapter() + ); expect(genModel.systemInstruction?.parts[0].text).to.equal('be friendly'); const mockResponse = getMockResponse( 'vertexAI', @@ -232,14 +259,20 @@ describe('GenerativeModel', () => { restore(); }); it('startChat overrides model values', async () => { - const genModel = new GenerativeModel(fakeAI, { - model: 'my-model', - tools: [ - { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } - ], - toolConfig: { functionCallingConfig: { mode: FunctionCallingMode.NONE } }, - systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } - }); + const genModel = new GenerativeModel( + fakeAI, + { + model: 'my-model', + tools: [ + { functionDeclarations: [{ name: 'myfunc', description: 'mydesc' }] } + ], + toolConfig: { + functionCallingConfig: { mode: FunctionCallingMode.NONE } + }, + systemInstruction: { role: 'system', parts: [{ text: 'be friendly' }] } + }, + new ChromeAdapter() + ); expect(genModel.tools?.length).to.equal(1); expect(genModel.toolConfig?.functionCallingConfig?.mode).to.equal( FunctionCallingMode.NONE @@ -284,7 +317,11 @@ describe('GenerativeModel', () => { restore(); }); it('calls countTokens', async () => { - const genModel = new GenerativeModel(fakeAI, { model: 'my-model' }); + const genModel = new GenerativeModel( + fakeAI, + { model: 'my-model' }, + new ChromeAdapter() + ); const mockResponse = getMockResponse( 'vertexAI', 'unary-success-total-tokens.json' diff --git a/packages/vertexai/src/models/generative-model.ts b/packages/vertexai/src/models/generative-model.ts index 2e7ed93eeb8..02965043d4e 100644 --- a/packages/vertexai/src/models/generative-model.ts +++ b/packages/vertexai/src/models/generative-model.ts @@ -43,12 +43,17 @@ import { } from '../requests/request-helpers'; import { AI } from '../public-types'; import { AIModel } from './genai-model'; +import { ChromeAdapter } from '../methods/chrome-adapter'; /** * Class for generative model APIs. * @public */ export class GenerativeModel extends AIModel { + /** + * Defines the name of the default in-cloud model to use for hybrid inference. + */ + static DEFAULT_HYBRID_IN_CLOUD_MODEL = 'gemini-2.0-flash-lite'; generationConfig: GenerationConfig; safetySettings: SafetySetting[]; requestOptions?: RequestOptions; @@ -59,6 +64,7 @@ export class GenerativeModel extends AIModel { constructor( ai: AI, modelParams: ModelParams, + private chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions ) { super(ai, modelParams.model); @@ -91,6 +97,7 @@ export class GenerativeModel extends AIModel { systemInstruction: this.systemInstruction, ...formattedParams }, + this.chromeAdapter, this.requestOptions ); } @@ -116,6 +123,7 @@ export class GenerativeModel extends AIModel { systemInstruction: this.systemInstruction, ...formattedParams }, + this.chromeAdapter, this.requestOptions ); } @@ -128,6 +136,7 @@ export class GenerativeModel extends AIModel { return new ChatSession( this._apiSettings, this.model, + this.chromeAdapter, { tools: this.tools, toolConfig: this.toolConfig, @@ -145,6 +154,11 @@ export class GenerativeModel extends AIModel { request: CountTokensRequest | string | Array ): Promise { const formattedParams = formatGenerateContentInput(request); - return countTokens(this._apiSettings, this.model, formattedParams); + return countTokens( + this._apiSettings, + this.model, + formattedParams, + this.chromeAdapter + ); } } diff --git a/packages/vertexai/src/types/language-model.ts b/packages/vertexai/src/types/language-model.ts new file mode 100644 index 00000000000..cd84f22dbdb --- /dev/null +++ b/packages/vertexai/src/types/language-model.ts @@ -0,0 +1,82 @@ +/** + * @license + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export interface LanguageModel extends EventTarget { + create(options?: LanguageModelCreateOptions): Promise; + availability(options?: LanguageModelCreateCoreOptions): Promise; + prompt( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): Promise; + promptStreaming( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): ReadableStream; + measureInputUsage( + input: LanguageModelPrompt, + options?: LanguageModelPromptOptions + ): Promise; + destroy(): undefined; +} +export enum Availability { + 'unavailable' = 'unavailable', + 'downloadable' = 'downloadable', + 'downloading' = 'downloading', + 'available' = 'available' +} +export interface LanguageModelCreateCoreOptions { + topK?: number; + temperature?: number; + expectedInputs?: LanguageModelExpectedInput[]; +} +export interface LanguageModelCreateOptions + extends LanguageModelCreateCoreOptions { + signal?: AbortSignal; + systemPrompt?: string; + initialPrompts?: LanguageModelInitialPrompts; +} +interface LanguageModelPromptOptions { + signal?: AbortSignal; +} +interface LanguageModelExpectedInput { + type: LanguageModelMessageType; + languages?: string[]; +} +// TODO: revert to type from Prompt API explainer once it's supported. +export type LanguageModelPrompt = LanguageModelMessageContent[]; +type LanguageModelInitialPrompts = + | LanguageModelMessage[] + | LanguageModelMessageShorthand[]; +interface LanguageModelMessage { + role: LanguageModelMessageRole; + content: LanguageModelMessageContent[]; +} +interface LanguageModelMessageShorthand { + role: LanguageModelMessageRole; + content: string; +} +export interface LanguageModelMessageContent { + type: LanguageModelMessageType; + content: LanguageModelMessageContentValue; +} +type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; +type LanguageModelMessageType = 'text' | 'image' | 'audio'; +type LanguageModelMessageContentValue = + | ImageBitmapSource + | AudioBuffer + | BufferSource + | string; diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts index 33ed804bb9f..36700b5a936 100644 --- a/packages/vertexai/src/types/requests.ts +++ b/packages/vertexai/src/types/requests.ts @@ -17,6 +17,7 @@ import { TypedSchema } from '../requests/schema-builder'; import { Content, Part } from './content'; +import { LanguageModelCreateOptions } from './language-model'; import { FunctionCallingMode, HarmBlockMethod, @@ -218,3 +219,29 @@ export interface FunctionCallingConfig { mode?: FunctionCallingMode; allowedFunctionNames?: string[]; } + +/** + * Toggles hybrid inference. + */ +export interface HybridParams { + /** + * Specifies on-device or in-cloud inference. Defaults to prefer on-device. + */ + mode: InferenceMode; + /** + * Optional. Specifies advanced params for on-device inference. + */ + onDeviceParams?: LanguageModelCreateOptions; + /** + * Optional. Specifies advanced params for in-cloud inference. + */ + inCloudParams?: ModelParams; +} + +/** + * Determines whether inference happens on-device or in-cloud. + */ +export type InferenceMode = + | 'prefer_on_device' + | 'only_on_device' + | 'only_in_cloud'; From dd0c4b10d21bd783b856b24e8594b5b030fc4a56 Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Wed, 23 Apr 2025 17:39:03 -0700 Subject: [PATCH 10/16] VinF Hybrid Inference: update docs (#8970) --- common/api-review/firestore.api.md | 42 ++--- common/api-review/vertexai.api.md | 19 ++- docs-devsite/_toc.yaml | 26 +-- docs-devsite/firestore_.md | 159 ++++++++++++++++-- .../firestore_.memorycachesettings.md | 35 ---- .../firestore_.memoryeagergarbagecollector.md | 37 ---- docs-devsite/firestore_.memorylocalcache.md | 35 ---- .../firestore_.memorylrugarbagecollector.md | 37 ---- .../firestore_.persistentcachesettings.md | 50 ------ .../firestore_.persistentlocalcache.md | 35 ---- ...firestore_.persistentmultipletabmanager.md | 33 ---- .../firestore_.persistentsingletabmanager.md | 33 ---- ...ore_.persistentsingletabmanagersettings.md | 35 ---- docs-devsite/vertexai.ai.md | 4 +- docs-devsite/vertexai.aioptions.md | 4 +- docs-devsite/vertexai.backend.md | 57 +++++++ docs-devsite/vertexai.chatsession.md | 5 +- docs-devsite/vertexai.citation.md | 8 +- docs-devsite/vertexai.counttokensresponse.md | 4 +- docs-devsite/vertexai.generativemodel.md | 16 +- docs-devsite/vertexai.googleaibackend.md | 36 ++++ docs-devsite/vertexai.hybridparams.md | 57 +++++++ docs-devsite/vertexai.md | 114 +++---------- docs-devsite/vertexai.modelparams.md | 2 +- docs-devsite/vertexai.promptfeedback.md | 4 +- docs-devsite/vertexai.requestoptions.md | 2 +- docs-devsite/vertexai.safetyrating.md | 12 +- docs-devsite/vertexai.vertexaibackend.md | 58 +++++++ .../src/backwards-compatbility.test.ts | 7 +- 29 files changed, 464 insertions(+), 502 deletions(-) delete mode 100644 docs-devsite/firestore_.memorycachesettings.md delete mode 100644 docs-devsite/firestore_.memoryeagergarbagecollector.md delete mode 100644 docs-devsite/firestore_.memorylocalcache.md delete mode 100644 docs-devsite/firestore_.memorylrugarbagecollector.md delete mode 100644 docs-devsite/firestore_.persistentcachesettings.md delete mode 100644 docs-devsite/firestore_.persistentlocalcache.md delete mode 100644 docs-devsite/firestore_.persistentmultipletabmanager.md delete mode 100644 docs-devsite/firestore_.persistentsingletabmanager.md delete mode 100644 docs-devsite/firestore_.persistentsingletabmanagersettings.md create mode 100644 docs-devsite/vertexai.backend.md create mode 100644 docs-devsite/vertexai.googleaibackend.md create mode 100644 docs-devsite/vertexai.hybridparams.md create mode 100644 docs-devsite/vertexai.vertexaibackend.md diff --git a/common/api-review/firestore.api.md b/common/api-review/firestore.api.md index 34b56b97f21..26c379a6e34 100644 --- a/common/api-review/firestore.api.md +++ b/common/api-review/firestore.api.md @@ -375,15 +375,14 @@ export interface LoadBundleTaskProgress { export { LogLevel } // @public -export interface MemoryCacheSettings { +export type MemoryCacheSettings = { garbageCollector?: MemoryGarbageCollector; -} +}; // @public -export interface MemoryEagerGarbageCollector { - // (undocumented) +export type MemoryEagerGarbageCollector = { kind: 'memoryEager'; -} +}; // @public export function memoryEagerGarbageCollector(): MemoryEagerGarbageCollector; @@ -392,19 +391,17 @@ export function memoryEagerGarbageCollector(): MemoryEagerGarbageCollector; export type MemoryGarbageCollector = MemoryEagerGarbageCollector | MemoryLruGarbageCollector; // @public -export interface MemoryLocalCache { - // (undocumented) +export type MemoryLocalCache = { kind: 'memory'; -} +}; // @public export function memoryLocalCache(settings?: MemoryCacheSettings): MemoryLocalCache; // @public -export interface MemoryLruGarbageCollector { - // (undocumented) +export type MemoryLruGarbageCollector = { kind: 'memoryLru'; -} +}; // @public export function memoryLruGarbageCollector(settings?: { @@ -494,42 +491,39 @@ export class PersistentCacheIndexManager { } // @public -export interface PersistentCacheSettings { +export type PersistentCacheSettings = { cacheSizeBytes?: number; tabManager?: PersistentTabManager; -} +}; // @public -export interface PersistentLocalCache { - // (undocumented) +export type PersistentLocalCache = { kind: 'persistent'; -} +}; // @public export function persistentLocalCache(settings?: PersistentCacheSettings): PersistentLocalCache; // @public -export interface PersistentMultipleTabManager { - // (undocumented) +export type PersistentMultipleTabManager = { kind: 'PersistentMultipleTab'; -} +}; // @public export function persistentMultipleTabManager(): PersistentMultipleTabManager; // @public -export interface PersistentSingleTabManager { - // (undocumented) +export type PersistentSingleTabManager = { kind: 'persistentSingleTab'; -} +}; // @public export function persistentSingleTabManager(settings: PersistentSingleTabManagerSettings | undefined): PersistentSingleTabManager; // @public -export interface PersistentSingleTabManagerSettings { +export type PersistentSingleTabManagerSettings = { forceOwnership?: boolean; -} +}; // @public export type PersistentTabManager = PersistentSingleTabManager | PersistentMultipleTabManager; diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index 1650bf3381e..8758d25bdce 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -112,7 +112,8 @@ export class BooleanSchema extends Schema { // @public export class ChatSession { - constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); + // Warning: (ae-forgotten-export) The symbol "ChromeAdapter" needs to be exported by the entry point index.d.ts + constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); getHistory(): Promise; // (undocumented) model: string; @@ -392,8 +393,9 @@ export interface GenerativeContentBlob { // @public export class GenerativeModel extends AIModel { - constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); + constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions); countTokens(request: CountTokensRequest | string | Array): Promise; + static DEFAULT_HYBRID_IN_CLOUD_MODEL: string; generateContent(request: GenerateContentRequest | string | Array): Promise; generateContentStream(request: GenerateContentRequest | string | Array): Promise; // (undocumented) @@ -415,7 +417,7 @@ export class GenerativeModel extends AIModel { export function getAI(app?: FirebaseApp, options?: AIOptions): AI; // @public -export function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; // @beta export function getImagenModel(ai: AI, modelParams: ImagenModelParams, requestOptions?: RequestOptions): ImagenModel; @@ -547,6 +549,14 @@ export enum HarmSeverity { HARM_SEVERITY_UNSUPPORTED = "HARM_SEVERITY_UNSUPPORTED" } +// @public +export interface HybridParams { + inCloudParams?: ModelParams; + mode: InferenceMode; + // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts + onDeviceParams?: LanguageModelCreateOptions; +} + // @beta export enum ImagenAspectRatio { LANDSCAPE_16x9 = "16:9", @@ -631,6 +641,9 @@ export interface ImagenSafetySettings { safetyFilterLevel?: ImagenSafetyFilterLevel; } +// @public +export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud'; + // @public export interface InlineDataPart { // (undocumented) diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml index 03d6b5f6ec7..c1a10429ad7 100644 --- a/docs-devsite/_toc.yaml +++ b/docs-devsite/_toc.yaml @@ -245,28 +245,10 @@ toc: path: /docs/reference/js/firestore_.loadbundletask.md - title: LoadBundleTaskProgress path: /docs/reference/js/firestore_.loadbundletaskprogress.md - - title: MemoryCacheSettings - path: /docs/reference/js/firestore_.memorycachesettings.md - - title: MemoryEagerGarbageCollector - path: /docs/reference/js/firestore_.memoryeagergarbagecollector.md - - title: MemoryLocalCache - path: /docs/reference/js/firestore_.memorylocalcache.md - - title: MemoryLruGarbageCollector - path: /docs/reference/js/firestore_.memorylrugarbagecollector.md - title: PersistenceSettings path: /docs/reference/js/firestore_.persistencesettings.md - title: PersistentCacheIndexManager path: /docs/reference/js/firestore_.persistentcacheindexmanager.md - - title: PersistentCacheSettings - path: /docs/reference/js/firestore_.persistentcachesettings.md - - title: PersistentLocalCache - path: /docs/reference/js/firestore_.persistentlocalcache.md - - title: PersistentMultipleTabManager - path: /docs/reference/js/firestore_.persistentmultipletabmanager.md - - title: PersistentSingleTabManager - path: /docs/reference/js/firestore_.persistentsingletabmanager.md - - title: PersistentSingleTabManagerSettings - path: /docs/reference/js/firestore_.persistentsingletabmanagersettings.md - title: Query path: /docs/reference/js/firestore_.query.md - title: QueryCompositeFilterConstraint @@ -482,6 +464,8 @@ toc: path: /docs/reference/js/vertexai.aioptions.md - title: ArraySchema path: /docs/reference/js/vertexai.arrayschema.md + - title: Backend + path: /docs/reference/js/vertexai.backend.md - title: BaseParams path: /docs/reference/js/vertexai.baseparams.md - title: BooleanSchema @@ -540,10 +524,14 @@ toc: path: /docs/reference/js/vertexai.generativecontentblob.md - title: GenerativeModel path: /docs/reference/js/vertexai.generativemodel.md + - title: GoogleAIBackend + path: /docs/reference/js/vertexai.googleaibackend.md - title: GroundingAttribution path: /docs/reference/js/vertexai.groundingattribution.md - title: GroundingMetadata path: /docs/reference/js/vertexai.groundingmetadata.md + - title: HybridParams + path: /docs/reference/js/vertexai.hybridparams.md - title: ImagenGCSImage path: /docs/reference/js/vertexai.imagengcsimage.md - title: ImagenGenerationConfig @@ -606,6 +594,8 @@ toc: path: /docs/reference/js/vertexai.toolconfig.md - title: UsageMetadata path: /docs/reference/js/vertexai.usagemetadata.md + - title: VertexAIBackend + path: /docs/reference/js/vertexai.vertexaibackend.md - title: VertexAIOptions path: /docs/reference/js/vertexai.vertexaioptions.md - title: VideoMetadata diff --git a/docs-devsite/firestore_.md b/docs-devsite/firestore_.md index 91d21e32708..7dfde135ab0 100644 --- a/docs-devsite/firestore_.md +++ b/docs-devsite/firestore_.md @@ -174,16 +174,7 @@ https://github.com/firebase/firebase-js-sdk | [IndexConfiguration](./firestore_.indexconfiguration.md#indexconfiguration_interface) | (Public Preview) A list of Firestore indexes to speed up local query execution.See [JSON Format](https://firebase.google.com/docs/reference/firestore/indexes/#json_format) for a description of the format of the index definition. | | [IndexField](./firestore_.indexfield.md#indexfield_interface) | (Public Preview) A single field element in an index configuration. | | [LoadBundleTaskProgress](./firestore_.loadbundletaskprogress.md#loadbundletaskprogress_interface) | Represents a progress update or a final state from loading bundles. | -| [MemoryCacheSettings](./firestore_.memorycachesettings.md#memorycachesettings_interface) | An settings object to configure an MemoryLocalCache instance. | -| [MemoryEagerGarbageCollector](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollector_interface) | A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them.This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache.Use factory function to create an instance of this collector. | -| [MemoryLocalCache](./firestore_.memorylocalcache.md#memorylocalcache_interface) | Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | -| [MemoryLruGarbageCollector](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollector_interface) | A garbage collector deletes Least-Recently-Used documents in multiple batches.This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint.Use factory function to create a instance of this collector. | | [PersistenceSettings](./firestore_.persistencesettings.md#persistencesettings_interface) | Settings that can be passed to enableIndexedDbPersistence() to configure Firestore persistence.Persistence cannot be used in a Node.js environment. | -| [PersistentCacheSettings](./firestore_.persistentcachesettings.md#persistentcachesettings_interface) | An settings object to configure an PersistentLocalCache instance.Persistent cache cannot be used in a Node.js environment. | -| [PersistentLocalCache](./firestore_.persistentlocalcache.md#persistentlocalcache_interface) | Provides a persistent cache backed by IndexedDb to the SDK.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | -| [PersistentMultipleTabManager](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanager_interface) | A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. | -| [PersistentSingleTabManager](./firestore_.persistentsingletabmanager.md#persistentsingletabmanager_interface) | A tab manager supporting only one tab, no synchronization will be performed across tabs. | -| [PersistentSingleTabManagerSettings](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettings_interface) | Type to configure an PersistentSingleTabManager instance. | | [SnapshotListenOptions](./firestore_.snapshotlistenoptions.md#snapshotlistenoptions_interface) | An options object that can be passed to [onSnapshot()](./firestore_.md#onsnapshot_0312fd7) and [QuerySnapshot.docChanges()](./firestore_.querysnapshot.md#querysnapshotdocchanges) to control which types of changes to include in the result set. | | [SnapshotOptions](./firestore_.snapshotoptions.md#snapshotoptions_interface) | Options that configure how data is retrieved from a DocumentSnapshot (for example the desired behavior for server timestamps that have not yet been set to their final value). | | [TransactionOptions](./firestore_.transactionoptions.md#transactionoptions_interface) | Options to customize transaction behavior. | @@ -208,10 +199,19 @@ https://github.com/firebase/firebase-js-sdk | [FirestoreErrorCode](./firestore_.md#firestoreerrorcode) | The set of Firestore status codes. The codes are the same at the ones exposed by gRPC here: https://github.com/grpc/grpc/blob/master/doc/statuscodes.mdPossible values: - 'cancelled': The operation was cancelled (typically by the caller). - 'unknown': Unknown error or an error from a different error domain. - 'invalid-argument': Client specified an invalid argument. Note that this differs from 'failed-precondition'. 'invalid-argument' indicates arguments that are problematic regardless of the state of the system (e.g. an invalid field name). - 'deadline-exceeded': Deadline expired before operation could complete. For operations that change the state of the system, this error may be returned even if the operation has completed successfully. For example, a successful response from a server could have been delayed long enough for the deadline to expire. - 'not-found': Some requested document was not found. - 'already-exists': Some document that we attempted to create already exists. - 'permission-denied': The caller does not have permission to execute the specified operation. - 'resource-exhausted': Some resource has been exhausted, perhaps a per-user quota, or perhaps the entire file system is out of space. - 'failed-precondition': Operation was rejected because the system is not in a state required for the operation's execution. - 'aborted': The operation was aborted, typically due to a concurrency issue like transaction aborts, etc. - 'out-of-range': Operation was attempted past the valid range. - 'unimplemented': Operation is not implemented or not supported/enabled. - 'internal': Internal errors. Means some invariants expected by underlying system has been broken. If you see one of these errors, something is very broken. - 'unavailable': The service is currently unavailable. This is most likely a transient condition and may be corrected by retrying with a backoff. - 'data-loss': Unrecoverable data loss or corruption. - 'unauthenticated': The request does not have valid authentication credentials for the operation. | | [FirestoreLocalCache](./firestore_.md#firestorelocalcache) | Union type from all supported SDK cache layer. | | [ListenSource](./firestore_.md#listensource) | Describe the source a query listens to.Set to default to listen to both cache and server changes. Set to cache to listen to changes in cache only. | +| [MemoryCacheSettings](./firestore_.md#memorycachesettings) | An settings object to configure an MemoryLocalCache instance. | +| [MemoryEagerGarbageCollector](./firestore_.md#memoryeagergarbagecollector) | A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them.This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache.Use factory function to create an instance of this collector. | | [MemoryGarbageCollector](./firestore_.md#memorygarbagecollector) | Union type from all support garbage collectors for memory local cache. | +| [MemoryLocalCache](./firestore_.md#memorylocalcache) | Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | +| [MemoryLruGarbageCollector](./firestore_.md#memorylrugarbagecollector) | A garbage collector deletes Least-Recently-Used documents in multiple batches.This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint.Use factory function to create a instance of this collector. | | [NestedUpdateFields](./firestore_.md#nestedupdatefields) | For each field (e.g. 'bar'), find all nested keys (e.g. {'bar.baz': T1, 'bar.qux': T2}). Intersect them together to make a single map containing all possible keys that are all marked as optional | | [OrderByDirection](./firestore_.md#orderbydirection) | The direction of a [orderBy()](./firestore_.md#orderby_006d61f) clause is specified as 'desc' or 'asc' (descending or ascending). | | [PartialWithFieldValue](./firestore_.md#partialwithfieldvalue) | Similar to TypeScript's Partial<T>, but allows nested fields to be omitted and FieldValues to be passed in as property values. | +| [PersistentCacheSettings](./firestore_.md#persistentcachesettings) | An settings object to configure an PersistentLocalCache instance.Persistent cache cannot be used in a Node.js environment. | +| [PersistentLocalCache](./firestore_.md#persistentlocalcache) | Provides a persistent cache backed by IndexedDb to the SDK.To use, create an instance using the factory function , then set the instance to FirestoreSettings.cache and call initializeFirestore using the settings object. | +| [PersistentMultipleTabManager](./firestore_.md#persistentmultipletabmanager) | A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. | +| [PersistentSingleTabManager](./firestore_.md#persistentsingletabmanager) | A tab manager supporting only one tab, no synchronization will be performed across tabs. | +| [PersistentSingleTabManagerSettings](./firestore_.md#persistentsingletabmanagersettings) | Type to configure an PersistentSingleTabManager instance. | | [PersistentTabManager](./firestore_.md#persistenttabmanager) | A union of all available tab managers. | | [Primitive](./firestore_.md#primitive) | Primitive types. | | [QueryConstraintType](./firestore_.md#queryconstrainttype) | Describes the different query constraints available in this SDK. | @@ -924,7 +924,7 @@ export declare function memoryEagerGarbageCollector(): MemoryEagerGarbageCollect ``` Returns: -[MemoryEagerGarbageCollector](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollector_interface) +[MemoryEagerGarbageCollector](./firestore_.md#memoryeagergarbagecollector) ### persistentMultipleTabManager() {:#persistentmultipletabmanager} @@ -937,7 +937,7 @@ export declare function persistentMultipleTabManager(): PersistentMultipleTabMan ``` Returns: -[PersistentMultipleTabManager](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanager_interface) +[PersistentMultipleTabManager](./firestore_.md#persistentmultipletabmanager) ### serverTimestamp() {:#servertimestamp} @@ -2293,11 +2293,11 @@ export declare function memoryLocalCache(settings?: MemoryCacheSettings): Memory | Parameter | Type | Description | | --- | --- | --- | -| settings | [MemoryCacheSettings](./firestore_.memorycachesettings.md#memorycachesettings_interface) | | +| settings | [MemoryCacheSettings](./firestore_.md#memorycachesettings) | | Returns: -[MemoryLocalCache](./firestore_.memorylocalcache.md#memorylocalcache_interface) +[MemoryLocalCache](./firestore_.md#memorylocalcache) ### memoryLruGarbageCollector(settings) {:#memorylrugarbagecollector_5ee014c} @@ -2321,7 +2321,7 @@ export declare function memoryLruGarbageCollector(settings?: { Returns: -[MemoryLruGarbageCollector](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollector_interface) +[MemoryLruGarbageCollector](./firestore_.md#memorylrugarbagecollector) ### persistentLocalCache(settings) {:#persistentlocalcache_d312f71} @@ -2339,11 +2339,11 @@ export declare function persistentLocalCache(settings?: PersistentCacheSettings) | Parameter | Type | Description | | --- | --- | --- | -| settings | [PersistentCacheSettings](./firestore_.persistentcachesettings.md#persistentcachesettings_interface) | | +| settings | [PersistentCacheSettings](./firestore_.md#persistentcachesettings) | | Returns: -[PersistentLocalCache](./firestore_.persistentlocalcache.md#persistentlocalcache_interface) +[PersistentLocalCache](./firestore_.md#persistentlocalcache) ### persistentSingleTabManager(settings) {:#persistentsingletabmanager_c99c68d} @@ -2359,11 +2359,11 @@ export declare function persistentSingleTabManager(settings: PersistentSingleTab | Parameter | Type | Description | | --- | --- | --- | -| settings | [PersistentSingleTabManagerSettings](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettings_interface) \| undefined | Configures the created tab manager. | +| settings | [PersistentSingleTabManagerSettings](./firestore_.md#persistentsingletabmanagersettings) \| undefined | Configures the created tab manager. | Returns: -[PersistentSingleTabManager](./firestore_.persistentsingletabmanager.md#persistentsingletabmanager_interface) +[PersistentSingleTabManager](./firestore_.md#persistentsingletabmanager) ## function(snapshot, ...) @@ -2591,6 +2591,34 @@ Set to `default` to listen to both cache and server changes. Set to `cache` to l export declare type ListenSource = 'default' | 'cache'; ``` +## MemoryCacheSettings + +An settings object to configure an `MemoryLocalCache` instance. + +Signature: + +```typescript +export declare type MemoryCacheSettings = { + garbageCollector?: MemoryGarbageCollector; +}; +``` + +## MemoryEagerGarbageCollector + +A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them. + +This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache. + +Use factory function to create an instance of this collector. + +Signature: + +```typescript +export declare type MemoryEagerGarbageCollector = { + kind: 'memoryEager'; +}; +``` + ## MemoryGarbageCollector Union type from all support garbage collectors for memory local cache. @@ -2601,6 +2629,36 @@ Union type from all support garbage collectors for memory local cache. export declare type MemoryGarbageCollector = MemoryEagerGarbageCollector | MemoryLruGarbageCollector; ``` +## MemoryLocalCache + +Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise. + +To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. + +Signature: + +```typescript +export declare type MemoryLocalCache = { + kind: 'memory'; +}; +``` + +## MemoryLruGarbageCollector + +A garbage collector deletes Least-Recently-Used documents in multiple batches. + +This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint. + +Use factory function to create a instance of this collector. + +Signature: + +```typescript +export declare type MemoryLruGarbageCollector = { + kind: 'memoryLru'; +}; +``` + ## NestedUpdateFields For each field (e.g. 'bar'), find all nested keys (e.g. {'bar.baz': T1, 'bar.qux': T2}). Intersect them together to make a single map containing all possible keys that are all marked as optional @@ -2635,6 +2693,71 @@ export declare type PartialWithFieldValue = Partial | (T extends Primitive } : never); ``` +## PersistentCacheSettings + +An settings object to configure an `PersistentLocalCache` instance. + +Persistent cache cannot be used in a Node.js environment. + +Signature: + +```typescript +export declare type PersistentCacheSettings = { + cacheSizeBytes?: number; + tabManager?: PersistentTabManager; +}; +``` + +## PersistentLocalCache + +Provides a persistent cache backed by IndexedDb to the SDK. + +To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. + +Signature: + +```typescript +export declare type PersistentLocalCache = { + kind: 'persistent'; +}; +``` + +## PersistentMultipleTabManager + +A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. + +Signature: + +```typescript +export declare type PersistentMultipleTabManager = { + kind: 'PersistentMultipleTab'; +}; +``` + +## PersistentSingleTabManager + +A tab manager supporting only one tab, no synchronization will be performed across tabs. + +Signature: + +```typescript +export declare type PersistentSingleTabManager = { + kind: 'persistentSingleTab'; +}; +``` + +## PersistentSingleTabManagerSettings + +Type to configure an `PersistentSingleTabManager` instance. + +Signature: + +```typescript +export declare type PersistentSingleTabManagerSettings = { + forceOwnership?: boolean; +}; +``` + ## PersistentTabManager A union of all available tab managers. diff --git a/docs-devsite/firestore_.memorycachesettings.md b/docs-devsite/firestore_.memorycachesettings.md deleted file mode 100644 index 69f46acdf7c..00000000000 --- a/docs-devsite/firestore_.memorycachesettings.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryCacheSettings interface -An settings object to configure an `MemoryLocalCache` instance. - -Signature: - -```typescript -export declare interface MemoryCacheSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [garbageCollector](./firestore_.memorycachesettings.md#memorycachesettingsgarbagecollector) | [MemoryGarbageCollector](./firestore_.md#memorygarbagecollector) | The garbage collector to use, for the memory cache layer. A MemoryEagerGarbageCollector is used when this is undefined. | - -## MemoryCacheSettings.garbageCollector - -The garbage collector to use, for the memory cache layer. A `MemoryEagerGarbageCollector` is used when this is undefined. - -Signature: - -```typescript -garbageCollector?: MemoryGarbageCollector; -``` diff --git a/docs-devsite/firestore_.memoryeagergarbagecollector.md b/docs-devsite/firestore_.memoryeagergarbagecollector.md deleted file mode 100644 index 01e7341611a..00000000000 --- a/docs-devsite/firestore_.memoryeagergarbagecollector.md +++ /dev/null @@ -1,37 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryEagerGarbageCollector interface -A garbage collector deletes documents whenever they are not part of any active queries, and have no local mutations attached to them. - -This collector tries to ensure lowest memory footprints from the SDK, at the risk of documents not being cached for offline queries or for direct queries to the cache. - -Use factory function to create an instance of this collector. - -Signature: - -```typescript -export declare interface MemoryEagerGarbageCollector -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memoryeagergarbagecollector.md#memoryeagergarbagecollectorkind) | 'memoryEager' | | - -## MemoryEagerGarbageCollector.kind - -Signature: - -```typescript -kind: 'memoryEager'; -``` diff --git a/docs-devsite/firestore_.memorylocalcache.md b/docs-devsite/firestore_.memorylocalcache.md deleted file mode 100644 index 92b7d3a2c72..00000000000 --- a/docs-devsite/firestore_.memorylocalcache.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryLocalCache interface -Provides an in-memory cache to the SDK. This is the default cache unless explicitly configured otherwise. - -To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. - -Signature: - -```typescript -export declare interface MemoryLocalCache -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memorylocalcache.md#memorylocalcachekind) | 'memory' | | - -## MemoryLocalCache.kind - -Signature: - -```typescript -kind: 'memory'; -``` diff --git a/docs-devsite/firestore_.memorylrugarbagecollector.md b/docs-devsite/firestore_.memorylrugarbagecollector.md deleted file mode 100644 index 6e15513934a..00000000000 --- a/docs-devsite/firestore_.memorylrugarbagecollector.md +++ /dev/null @@ -1,37 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# MemoryLruGarbageCollector interface -A garbage collector deletes Least-Recently-Used documents in multiple batches. - -This collector is configured with a target size, and will only perform collection when the cached documents exceed the target size. It avoids querying backend repeated for the same query or document, at the risk of having a larger memory footprint. - -Use factory function to create a instance of this collector. - -Signature: - -```typescript -export declare interface MemoryLruGarbageCollector -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.memorylrugarbagecollector.md#memorylrugarbagecollectorkind) | 'memoryLru' | | - -## MemoryLruGarbageCollector.kind - -Signature: - -```typescript -kind: 'memoryLru'; -``` diff --git a/docs-devsite/firestore_.persistentcachesettings.md b/docs-devsite/firestore_.persistentcachesettings.md deleted file mode 100644 index a32d05e4e8e..00000000000 --- a/docs-devsite/firestore_.persistentcachesettings.md +++ /dev/null @@ -1,50 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentCacheSettings interface -An settings object to configure an `PersistentLocalCache` instance. - -Persistent cache cannot be used in a Node.js environment. - -Signature: - -```typescript -export declare interface PersistentCacheSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [cacheSizeBytes](./firestore_.persistentcachesettings.md#persistentcachesettingscachesizebytes) | number | An approximate cache size threshold for the on-disk data. If the cache grows beyond this size, Firestore will start removing data that hasn't been recently used. The SDK does not guarantee that the cache will stay below that size, only that if the cache exceeds the given size, cleanup will be attempted.The default value is 40 MB. The threshold must be set to at least 1 MB, and can be set to CACHE_SIZE_UNLIMITED to disable garbage collection. | -| [tabManager](./firestore_.persistentcachesettings.md#persistentcachesettingstabmanager) | [PersistentTabManager](./firestore_.md#persistenttabmanager) | Specifies how multiple tabs/windows will be managed by the SDK. | - -## PersistentCacheSettings.cacheSizeBytes - -An approximate cache size threshold for the on-disk data. If the cache grows beyond this size, Firestore will start removing data that hasn't been recently used. The SDK does not guarantee that the cache will stay below that size, only that if the cache exceeds the given size, cleanup will be attempted. - -The default value is 40 MB. The threshold must be set to at least 1 MB, and can be set to `CACHE_SIZE_UNLIMITED` to disable garbage collection. - -Signature: - -```typescript -cacheSizeBytes?: number; -``` - -## PersistentCacheSettings.tabManager - -Specifies how multiple tabs/windows will be managed by the SDK. - -Signature: - -```typescript -tabManager?: PersistentTabManager; -``` diff --git a/docs-devsite/firestore_.persistentlocalcache.md b/docs-devsite/firestore_.persistentlocalcache.md deleted file mode 100644 index 48d876d15bd..00000000000 --- a/docs-devsite/firestore_.persistentlocalcache.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentLocalCache interface -Provides a persistent cache backed by IndexedDb to the SDK. - -To use, create an instance using the factory function , then set the instance to `FirestoreSettings.cache` and call `initializeFirestore` using the settings object. - -Signature: - -```typescript -export declare interface PersistentLocalCache -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentlocalcache.md#persistentlocalcachekind) | 'persistent' | | - -## PersistentLocalCache.kind - -Signature: - -```typescript -kind: 'persistent'; -``` diff --git a/docs-devsite/firestore_.persistentmultipletabmanager.md b/docs-devsite/firestore_.persistentmultipletabmanager.md deleted file mode 100644 index 20d9cc24452..00000000000 --- a/docs-devsite/firestore_.persistentmultipletabmanager.md +++ /dev/null @@ -1,33 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentMultipleTabManager interface -A tab manager supporting multiple tabs. SDK will synchronize queries and mutations done across all tabs using the SDK. - -Signature: - -```typescript -export declare interface PersistentMultipleTabManager -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentmultipletabmanager.md#persistentmultipletabmanagerkind) | 'PersistentMultipleTab' | | - -## PersistentMultipleTabManager.kind - -Signature: - -```typescript -kind: 'PersistentMultipleTab'; -``` diff --git a/docs-devsite/firestore_.persistentsingletabmanager.md b/docs-devsite/firestore_.persistentsingletabmanager.md deleted file mode 100644 index 22601cf31fb..00000000000 --- a/docs-devsite/firestore_.persistentsingletabmanager.md +++ /dev/null @@ -1,33 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentSingleTabManager interface -A tab manager supporting only one tab, no synchronization will be performed across tabs. - -Signature: - -```typescript -export declare interface PersistentSingleTabManager -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [kind](./firestore_.persistentsingletabmanager.md#persistentsingletabmanagerkind) | 'persistentSingleTab' | | - -## PersistentSingleTabManager.kind - -Signature: - -```typescript -kind: 'persistentSingleTab'; -``` diff --git a/docs-devsite/firestore_.persistentsingletabmanagersettings.md b/docs-devsite/firestore_.persistentsingletabmanagersettings.md deleted file mode 100644 index afe2842d4c4..00000000000 --- a/docs-devsite/firestore_.persistentsingletabmanagersettings.md +++ /dev/null @@ -1,35 +0,0 @@ -Project: /docs/reference/js/_project.yaml -Book: /docs/reference/_book.yaml -page_type: reference - -{% comment %} -DO NOT EDIT THIS FILE! -This is generated by the JS SDK team, and any local changes will be -overwritten. Changes should be made in the source code at -https://github.com/firebase/firebase-js-sdk -{% endcomment %} - -# PersistentSingleTabManagerSettings interface -Type to configure an `PersistentSingleTabManager` instance. - -Signature: - -```typescript -export declare interface PersistentSingleTabManagerSettings -``` - -## Properties - -| Property | Type | Description | -| --- | --- | --- | -| [forceOwnership](./firestore_.persistentsingletabmanagersettings.md#persistentsingletabmanagersettingsforceownership) | boolean | Whether to force-enable persistent (IndexedDB) cache for the client. This cannot be used with multi-tab synchronization and is primarily intended for use with Web Workers. Setting this to true will enable IndexedDB, but cause other tabs using IndexedDB cache to fail. | - -## PersistentSingleTabManagerSettings.forceOwnership - -Whether to force-enable persistent (IndexedDB) cache for the client. This cannot be used with multi-tab synchronization and is primarily intended for use with Web Workers. Setting this to `true` will enable IndexedDB, but cause other tabs using IndexedDB cache to fail. - -Signature: - -```typescript -forceOwnership?: boolean; -``` diff --git a/docs-devsite/vertexai.ai.md b/docs-devsite/vertexai.ai.md index 2901c2ccd01..3be9ea5d488 100644 --- a/docs-devsite/vertexai.ai.md +++ b/docs-devsite/vertexai.ai.md @@ -25,7 +25,7 @@ export interface AI | Property | Type | Description | | --- | --- | --- | | [app](./vertexai.ai.md#aiapp) | [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) | The [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface) this [AI](./vertexai.ai.md#ai_interface) instance is associated with. | -| [backend](./vertexai.ai.md#aibackend) | [Backend](./vertexai.md#backend) | A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. | +| [backend](./vertexai.ai.md#aibackend) | [Backend](./vertexai.backend.md#backend_class) | A [Backend](./vertexai.backend.md#backend_class) instance that specifies the backend configuration. | | [location](./vertexai.ai.md#ailocation) | string | The location configured for this AI service instance, relevant for Vertex AI backends. | ## AI.app @@ -40,7 +40,7 @@ app: FirebaseApp; ## AI.backend -A [Backend](./vertexai.md#backend) instance that specifies the backend configuration. +A [Backend](./vertexai.backend.md#backend_class) instance that specifies the backend configuration. Signature: diff --git a/docs-devsite/vertexai.aioptions.md b/docs-devsite/vertexai.aioptions.md index 4d5e7117740..393a83b3f9c 100644 --- a/docs-devsite/vertexai.aioptions.md +++ b/docs-devsite/vertexai.aioptions.md @@ -22,11 +22,11 @@ export interface AIOptions | Property | Type | Description | | --- | --- | --- | -| [backend](./vertexai.aioptions.md#aioptionsbackend) | [Backend](./vertexai.md#backend) | The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. | +| [backend](./vertexai.aioptions.md#aioptionsbackend) | [Backend](./vertexai.backend.md#backend_class) | The backend configuration to use for the AI service instance. | ## AIOptions.backend -The backend configuration to use for the AI service instance. Use [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) to create this configuration. +The backend configuration to use for the AI service instance. Signature: diff --git a/docs-devsite/vertexai.backend.md b/docs-devsite/vertexai.backend.md new file mode 100644 index 00000000000..b55224f5205 --- /dev/null +++ b/docs-devsite/vertexai.backend.md @@ -0,0 +1,57 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# Backend class +Abstract base class representing the configuration for an AI service backend. This class should not be instantiated directly. Use its subclasses [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class). + +Signature: + +```typescript +export declare abstract class Backend +``` + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)(type)](./vertexai.backend.md#backendconstructor) | | Protected constructor for use by subclasses. | + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [backendType](./vertexai.backend.md#backendbackendtype) | | [BackendType](./vertexai.md#backendtype) | Specifies the backend type (either 'GOOGLE\_AI' or 'VERTEX\_AI'). | + +## Backend.(constructor) + +Protected constructor for use by subclasses. + +Signature: + +```typescript +protected constructor(type: BackendType); +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| type | [BackendType](./vertexai.md#backendtype) | The specific backend type constant (e.g., BackendType.GOOGLE\_AI). | + +## Backend.backendType + +Specifies the backend type (either 'GOOGLE\_AI' or 'VERTEX\_AI'). + +Signature: + +```typescript +readonly backendType: BackendType; +``` diff --git a/docs-devsite/vertexai.chatsession.md b/docs-devsite/vertexai.chatsession.md index ed359f7e08c..c4a06206bfd 100644 --- a/docs-devsite/vertexai.chatsession.md +++ b/docs-devsite/vertexai.chatsession.md @@ -22,7 +22,7 @@ export declare class ChatSession | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiSettings, model, params, requestOptions)](./vertexai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession class | +| [(constructor)(apiSettings, model, chromeAdapter, params, requestOptions)](./vertexai.chatsession.md#chatsessionconstructor) | | Constructs a new instance of the ChatSession class | ## Properties @@ -47,7 +47,7 @@ Constructs a new instance of the `ChatSession` class Signature: ```typescript -constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); +constructor(apiSettings: ApiSettings, model: string, chromeAdapter: ChromeAdapter, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); ``` #### Parameters @@ -56,6 +56,7 @@ constructor(apiSettings: ApiSettings, model: string, params?: StartChatParams | | --- | --- | --- | | apiSettings | ApiSettings | | | model | string | | +| chromeAdapter | ChromeAdapter | | | params | [StartChatParams](./vertexai.startchatparams.md#startchatparams_interface) \| undefined | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) \| undefined | | diff --git a/docs-devsite/vertexai.citation.md b/docs-devsite/vertexai.citation.md index b5f5a19f231..f2e4e2581cb 100644 --- a/docs-devsite/vertexai.citation.md +++ b/docs-devsite/vertexai.citation.md @@ -24,9 +24,9 @@ export interface Citation | --- | --- | --- | | [endIndex](./vertexai.citation.md#citationendindex) | number | | | [license](./vertexai.citation.md#citationlicense) | string | | -| [publicationDate](./vertexai.citation.md#citationpublicationdate) | Date | | +| [publicationDate](./vertexai.citation.md#citationpublicationdate) | Date | This field is not supported in Google AI. | | [startIndex](./vertexai.citation.md#citationstartindex) | number | | -| [title](./vertexai.citation.md#citationtitle) | string | | +| [title](./vertexai.citation.md#citationtitle) | string | This field is not supported in Google AI. | | [uri](./vertexai.citation.md#citationuri) | string | | ## Citation.endIndex @@ -47,6 +47,8 @@ license?: string; ## Citation.publicationDate +This field is not supported in Google AI. + Signature: ```typescript @@ -63,6 +65,8 @@ startIndex?: number; ## Citation.title +This field is not supported in Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.counttokensresponse.md b/docs-devsite/vertexai.counttokensresponse.md index d67cc99fab2..dab373586b4 100644 --- a/docs-devsite/vertexai.counttokensresponse.md +++ b/docs-devsite/vertexai.counttokensresponse.md @@ -23,7 +23,7 @@ export interface CountTokensResponse | Property | Type | Description | | --- | --- | --- | | [promptTokensDetails](./vertexai.counttokensresponse.md#counttokensresponseprompttokensdetails) | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface)\[\] | The breakdown, by modality, of how many tokens are consumed by the prompt. | -| [totalBillableCharacters](./vertexai.counttokensresponse.md#counttokensresponsetotalbillablecharacters) | number | The total number of billable characters counted across all instances from the request. | +| [totalBillableCharacters](./vertexai.counttokensresponse.md#counttokensresponsetotalbillablecharacters) | number | The total number of billable characters counted across all instances from the request.This field is not supported in Google AI, so it will default to 0 when using Google AI. | | [totalTokens](./vertexai.counttokensresponse.md#counttokensresponsetotaltokens) | number | The total number of tokens counted across all instances from the request. | ## CountTokensResponse.promptTokensDetails @@ -40,6 +40,8 @@ promptTokensDetails?: ModalityTokenCount[]; The total number of billable characters counted across all instances from the request. +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.generativemodel.md b/docs-devsite/vertexai.generativemodel.md index ba82b65aceb..4012cf53665 100644 --- a/docs-devsite/vertexai.generativemodel.md +++ b/docs-devsite/vertexai.generativemodel.md @@ -23,12 +23,13 @@ export declare class GenerativeModel extends AIModel | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(ai, modelParams, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(ai, modelParams, chromeAdapter, requestOptions)](./vertexai.generativemodel.md#generativemodelconstructor) | | Constructs a new instance of the GenerativeModel class | ## Properties | Property | Modifiers | Type | Description | | --- | --- | --- | --- | +| [DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL](./vertexai.generativemodel.md#generativemodeldefault_hybrid_in_cloud_model) | static | string | Defines the name of the default in-cloud model to use for hybrid inference. | | [generationConfig](./vertexai.generativemodel.md#generativemodelgenerationconfig) | | [GenerationConfig](./vertexai.generationconfig.md#generationconfig_interface) | | | [requestOptions](./vertexai.generativemodel.md#generativemodelrequestoptions) | | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | | [safetySettings](./vertexai.generativemodel.md#generativemodelsafetysettings) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface)\[\] | | @@ -52,7 +53,7 @@ Constructs a new instance of the `GenerativeModel` class Signature: ```typescript -constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(ai: AI, modelParams: ModelParams, chromeAdapter: ChromeAdapter, requestOptions?: RequestOptions); ``` #### Parameters @@ -61,8 +62,19 @@ constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions); | --- | --- | --- | | ai | [AI](./vertexai.ai.md#ai_interface) | | | modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | +| chromeAdapter | ChromeAdapter | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | +## GenerativeModel.DEFAULT\_HYBRID\_IN\_CLOUD\_MODEL + +Defines the name of the default in-cloud model to use for hybrid inference. + +Signature: + +```typescript +static DEFAULT_HYBRID_IN_CLOUD_MODEL: string; +``` + ## GenerativeModel.generationConfig Signature: diff --git a/docs-devsite/vertexai.googleaibackend.md b/docs-devsite/vertexai.googleaibackend.md new file mode 100644 index 00000000000..99e9bd18a14 --- /dev/null +++ b/docs-devsite/vertexai.googleaibackend.md @@ -0,0 +1,36 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# GoogleAIBackend class +Represents the configuration class for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export declare class GoogleAIBackend extends Backend +``` +Extends: [Backend](./vertexai.backend.md#backend_class) + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)()](./vertexai.googleaibackend.md#googleaibackendconstructor) | | Creates a configuration object for the Google AI backend. | + +## GoogleAIBackend.(constructor) + +Creates a configuration object for the Google AI backend. + +Signature: + +```typescript +constructor(); +``` diff --git a/docs-devsite/vertexai.hybridparams.md b/docs-devsite/vertexai.hybridparams.md new file mode 100644 index 00000000000..cf847b40fa7 --- /dev/null +++ b/docs-devsite/vertexai.hybridparams.md @@ -0,0 +1,57 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# HybridParams interface +Toggles hybrid inference. + +Signature: + +```typescript +export interface HybridParams +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [inCloudParams](./vertexai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. | +| [mode](./vertexai.hybridparams.md#hybridparamsmode) | [InferenceMode](./vertexai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. | +| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | LanguageModelCreateOptions | Optional. Specifies advanced params for on-device inference. | + +## HybridParams.inCloudParams + +Optional. Specifies advanced params for in-cloud inference. + +Signature: + +```typescript +inCloudParams?: ModelParams; +``` + +## HybridParams.mode + +Specifies on-device or in-cloud inference. Defaults to prefer on-device. + +Signature: + +```typescript +mode: InferenceMode; +``` + +## HybridParams.onDeviceParams + +Optional. Specifies advanced params for on-device inference. + +Signature: + +```typescript +onDeviceParams?: LanguageModelCreateOptions; +``` diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md index 544deb2987d..46eafd41e80 100644 --- a/docs-devsite/vertexai.md +++ b/docs-devsite/vertexai.md @@ -18,14 +18,10 @@ The Firebase AI Web SDK. | --- | --- | | function(app, ...) | | [getAI(app, options)](./vertexai.md#getai_a94a413) | Returns the default [AI](./vertexai.ai.md#ai_interface) instance that is associated with the provided [FirebaseApp](./app.firebaseapp.md#firebaseapp_interface). If no instance exists, initializes a new instance with the default settings. | -| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | -| function() | -| [googleAIBackend()](./vertexai.md#googleaibackend) | Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. | +| [getVertexAI(app, options)](./vertexai.md#getvertexai_04094cf) | It is recommended to use the new [getAI()](./vertexai.md#getai_a94a413).Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. | | function(ai, ...) | -| [getGenerativeModel(ai, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_80bd839) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | +| [getGenerativeModel(ai, modelParams, requestOptions)](./vertexai.md#getgenerativemodel_c63f46a) | Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. | | [getImagenModel(ai, modelParams, requestOptions)](./vertexai.md#getimagenmodel_e1f6645) | (Public Preview) Returns an [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) class with methods for using Imagen.Only Imagen 3 models (named imagen-3.0-*) are supported. | -| function(location, ...) | -| [vertexAIBackend(location)](./vertexai.md#vertexaibackend_d0a4534) | Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. | ## Classes @@ -34,9 +30,11 @@ The Firebase AI Web SDK. | [AIError](./vertexai.aierror.md#aierror_class) | Error class for the Firebase AI SDK. | | [AIModel](./vertexai.aimodel.md#aimodel_class) | Base class for Firebase AI model APIs. | | [ArraySchema](./vertexai.arrayschema.md#arrayschema_class) | Schema class for "array" types. The items param should refer to the type of item that can be a member of the array. | +| [Backend](./vertexai.backend.md#backend_class) | Abstract base class representing the configuration for an AI service backend. This class should not be instantiated directly. Use its subclasses [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class). | | [BooleanSchema](./vertexai.booleanschema.md#booleanschema_class) | Schema class for "boolean" types. | | [ChatSession](./vertexai.chatsession.md#chatsession_class) | ChatSession class that enables sending chat messages and stores history of sent and received messages so far. | | [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) | Class for generative model APIs. | +| [GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) | Represents the configuration class for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). | | [ImagenImageFormat](./vertexai.imagenimageformat.md#imagenimageformat_class) | (Public Preview) Defines the image format for images generated by Imagen.Use this class to specify the desired format (JPEG or PNG) and compression quality for images generated by Imagen. This is typically included as part of [ImagenModelParams](./vertexai.imagenmodelparams.md#imagenmodelparams_interface). | | [ImagenModel](./vertexai.imagenmodel.md#imagenmodel_class) | (Public Preview) Class for Imagen model APIs.This class provides methods for generating images using the Imagen model. | | [IntegerSchema](./vertexai.integerschema.md#integerschema_class) | Schema class for "integer" types. | @@ -44,6 +42,7 @@ The Firebase AI Web SDK. | [ObjectSchema](./vertexai.objectschema.md#objectschema_class) | Schema class for "object" types. The properties param must be a map of Schema objects. | | [Schema](./vertexai.schema.md#schema_class) | Parent class encompassing all Schema types, with static methods that allow building specific Schema types. This class can be converted with JSON.stringify() into a JSON string accepted by Vertex AI REST endpoints. (This string conversion is automatically done when calling SDK methods.) | | [StringSchema](./vertexai.stringschema.md#stringschema_class) | Schema class for "string" types. Can be used with or without enum values. | +| [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class) | Represents the configuration class for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). | ## Enumerations @@ -98,6 +97,7 @@ The Firebase AI Web SDK. | [GenerativeContentBlob](./vertexai.generativecontentblob.md#generativecontentblob_interface) | Interface for sending an image. | | [GroundingAttribution](./vertexai.groundingattribution.md#groundingattribution_interface) | | | [GroundingMetadata](./vertexai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned to client when grounding is enabled. | +| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | Toggles hybrid inference. | | [ImagenGCSImage](./vertexai.imagengcsimage.md#imagengcsimage_interface) | An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. | | [ImagenGenerationConfig](./vertexai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. | | [ImagenGenerationResponse](./vertexai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. | @@ -106,10 +106,10 @@ The Firebase AI Web SDK. | [ImagenSafetySettings](./vertexai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. | | [InlineDataPart](./vertexai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. | | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | -| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | +| [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. | | [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | -| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). | +| [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | | | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface) | A safety rating associated with a [GenerateContentCandidate](./vertexai.generatecontentcandidate.md#generatecontentcandidate_interface) | | [SafetySetting](./vertexai.safetysetting.md#safetysetting_interface) | Safety setting that can be sent as part of request parameters. | @@ -130,7 +130,7 @@ The Firebase AI Web SDK. | Variable | Description | | --- | --- | -| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase AI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. | +| [BackendType](./vertexai.md#backendtype) | An enum-like object containing constants that represent the supported backends for the Firebase AI SDK.These values are assigned to the backendType property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class)) to identify which service to target. | | [POSSIBLE\_ROLES](./vertexai.md#possible_roles) | Possible roles. | | [VertexAIError](./vertexai.md#vertexaierror) | Error class for the Firebase AI SDK.For more information, refer to the documentation for the new [AIError](./vertexai.aierror.md#aierror_class). | | [VertexAIModel](./vertexai.md#vertexaimodel) | Base class for Firebase AI model APIs.For more information, refer to the documentation for the new [AIModel](./vertexai.aimodel.md#aimodel_class). | @@ -139,15 +139,13 @@ The Firebase AI Web SDK. | Type Alias | Description | | --- | --- | -| [Backend](./vertexai.md#backend) | Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object.Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). | | [BackendType](./vertexai.md#backendtype) | Type alias representing valid backend types. It can be either 'VERTEX_AI' or 'GOOGLE_AI'. | -| [GoogleAIBackend](./vertexai.md#googleaibackend) | Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). | +| [InferenceMode](./vertexai.md#inferencemode) | Determines whether inference happens on-device or in-cloud. | | [Part](./vertexai.md#part) | Content part - includes text, image/video, or function call/response part types. | | [Role](./vertexai.md#role) | Role is the producer of the content. | | [Tool](./vertexai.md#tool) | Defines a tool that model can call to access external knowledge. | | [TypedSchema](./vertexai.md#typedschema) | A type that includes all specific Schema types. | -| [VertexAI](./vertexai.md#vertexai) | An instance of the Firebase AI SDK.For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). | -| [VertexAIBackend](./vertexai.md#vertexaibackend) | Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. | +| [VertexAI](./vertexai.md#vertexai) | An instance of the Firebase AI SDK.For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface) interface. | ## function(app, ...) @@ -187,7 +185,7 @@ const ai = getAI(app); ```javascript // Get an AI instance configured to use Google AI. -const ai = getAI(app, { backend: googleAIBackend() }); +const ai = getAI(app, { backend: new GoogleAIBackend() }); ``` @@ -196,12 +194,14 @@ const ai = getAI(app, { backend: googleAIBackend() }); ```javascript // Get an AI instance configured to use Vertex AI. -const ai = getAI(app, { backend: vertexAIBackend() }); +const ai = getAI(app, { backend: new VertexAIBackend() }); ``` ### getVertexAI(app, options) {:#getvertexai_04094cf} +It is recommended to use the new [getAI()](./vertexai.md#getai_a94a413). + Returns a [VertexAI](./vertexai.md#vertexai) instance for the given app. Signature: @@ -221,33 +221,16 @@ export declare function getVertexAI(app?: FirebaseApp, options?: VertexAIOptions [VertexAI](./vertexai.md#vertexai) -## function() - -### googleAIBackend() {:#googleaibackend} - -Creates a [Backend](./vertexai.md#backend) instance configured to use Google AI. - -Signature: - -```typescript -export declare function googleAIBackend(): GoogleAIBackend; -``` -Returns: - -[GoogleAIBackend](./vertexai.md#googleaibackend) - -A [GoogleAIBackend](./vertexai.md#googleaibackend) object. - ## function(ai, ...) -### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_80bd839} +### getGenerativeModel(ai, modelParams, requestOptions) {:#getgenerativemodel_c63f46a} Returns a [GenerativeModel](./vertexai.generativemodel.md#generativemodel_class) class with methods for inference and other functionality. Signature: ```typescript -export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions): GenerativeModel; +export declare function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; ``` #### Parameters @@ -255,7 +238,7 @@ export declare function getGenerativeModel(ai: AI, modelParams: ModelParams, req | Parameter | Type | Description | | --- | --- | --- | | ai | [AI](./vertexai.ai.md#ai_interface) | | -| modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | | +| modelParams | [ModelParams](./vertexai.modelparams.md#modelparams_interface) \| [HybridParams](./vertexai.hybridparams.md#hybridparams_interface) | | | requestOptions | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | | Returns: @@ -293,35 +276,11 @@ export declare function getImagenModel(ai: AI, modelParams: ImagenModelParams, r If the `apiKey` or `projectId` fields are missing in your Firebase config. -## function(location, ...) - -### vertexAIBackend(location) {:#vertexaibackend_d0a4534} - -Creates a [Backend](./vertexai.md#backend) instance configured to use Vertex AI. - -Signature: - -```typescript -export declare function vertexAIBackend(location?: string): VertexAIBackend; -``` - -#### Parameters - -| Parameter | Type | Description | -| --- | --- | --- | -| location | string | The region identifier, defaulting to us-central1; see [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | - -Returns: - -[VertexAIBackend](./vertexai.md#vertexaibackend) - -A [VertexAIBackend](./vertexai.md#vertexaibackend) object. - ## BackendType An enum-like object containing constants that represent the supported backends for the Firebase AI SDK. -These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.md#googleaibackend) or [VertexAIBackend](./vertexai.md#vertexaibackend)) to identify which service to target. +These values are assigned to the `backendType` property within the specific backend configuration objects ([GoogleAIBackend](./vertexai.googleaibackend.md#googleaibackend_class) or [VertexAIBackend](./vertexai.vertexaibackend.md#vertexaibackend_class)) to identify which service to target. Signature: @@ -366,18 +325,6 @@ For more information, refer to the documentation for the new [AIModel](./vertexa VertexAIModel: typeof AIModel ``` -## Backend - -Union type representing the backend configuration for the AI service. This can be either a [GoogleAIBackend](./vertexai.md#googleaibackend) or a [VertexAIBackend](./vertexai.md#vertexaibackend) configuration object. - -Create instances using [googleAIBackend()](./vertexai.md#googleaibackend) or [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534). - -Signature: - -```typescript -export type Backend = GoogleAIBackend | VertexAIBackend; -``` - ## BackendType Type alias representing valid backend types. It can be either `'VERTEX_AI'` or `'GOOGLE_AI'`. @@ -388,16 +335,14 @@ Type alias representing valid backend types. It can be either `'VERTEX_AI'` or ` export type BackendType = (typeof BackendType)[keyof typeof BackendType]; ``` -## GoogleAIBackend +## InferenceMode -Represents the configuration object for the Google AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the service with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [googleAIBackend()](./vertexai.md#googleaibackend). +Determines whether inference happens on-device or in-cloud. Signature: ```typescript -export type GoogleAIBackend = { - backendType: typeof BackendType.GOOGLE_AI; -}; +export type InferenceMode = 'prefer_on_device' | 'only_on_device' | 'only_in_cloud'; ``` ## Part @@ -444,7 +389,7 @@ export type TypedSchema = IntegerSchema | NumberSchema | StringSchema | BooleanS An instance of the Firebase AI SDK. -For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface). +For more information, refer to the documentation for the new [AI](./vertexai.ai.md#ai_interface) interface. Signature: @@ -452,19 +397,6 @@ For more information, refer to the documentation for the new [AI](./vertexai.ai. export type VertexAI = AI; ``` -## VertexAIBackend - -Represents the configuration object for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). Create an instance using [vertexAIBackend()](./vertexai.md#vertexaibackend_d0a4534) function. - -Signature: - -```typescript -export type VertexAIBackend = { - backendType: typeof BackendType.VERTEX_AI; - location: string; -}; -``` - ## AIErrorCode Standardized error codes that [AIError](./vertexai.aierror.md#aierror_class) can have. diff --git a/docs-devsite/vertexai.modelparams.md b/docs-devsite/vertexai.modelparams.md index bb8a87d5fb2..b4930cf9895 100644 --- a/docs-devsite/vertexai.modelparams.md +++ b/docs-devsite/vertexai.modelparams.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # ModelParams interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). Signature: diff --git a/docs-devsite/vertexai.promptfeedback.md b/docs-devsite/vertexai.promptfeedback.md index 369ef02051d..64332244e23 100644 --- a/docs-devsite/vertexai.promptfeedback.md +++ b/docs-devsite/vertexai.promptfeedback.md @@ -23,7 +23,7 @@ export interface PromptFeedback | Property | Type | Description | | --- | --- | --- | | [blockReason](./vertexai.promptfeedback.md#promptfeedbackblockreason) | [BlockReason](./vertexai.md#blockreason) | | -| [blockReasonMessage](./vertexai.promptfeedback.md#promptfeedbackblockreasonmessage) | string | | +| [blockReasonMessage](./vertexai.promptfeedback.md#promptfeedbackblockreasonmessage) | string | This field is unsupported in Google AI. | | [safetyRatings](./vertexai.promptfeedback.md#promptfeedbacksafetyratings) | [SafetyRating](./vertexai.safetyrating.md#safetyrating_interface)\[\] | | ## PromptFeedback.blockReason @@ -36,6 +36,8 @@ blockReason?: BlockReason; ## PromptFeedback.blockReasonMessage +This field is unsupported in Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.requestoptions.md b/docs-devsite/vertexai.requestoptions.md index 3c233d72b90..aec60365a0f 100644 --- a/docs-devsite/vertexai.requestoptions.md +++ b/docs-devsite/vertexai.requestoptions.md @@ -10,7 +10,7 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # RequestOptions interface -Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_80bd839). +Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). Signature: diff --git a/docs-devsite/vertexai.safetyrating.md b/docs-devsite/vertexai.safetyrating.md index 28493bafef0..34739830a60 100644 --- a/docs-devsite/vertexai.safetyrating.md +++ b/docs-devsite/vertexai.safetyrating.md @@ -25,9 +25,9 @@ export interface SafetyRating | [blocked](./vertexai.safetyrating.md#safetyratingblocked) | boolean | | | [category](./vertexai.safetyrating.md#safetyratingcategory) | [HarmCategory](./vertexai.md#harmcategory) | | | [probability](./vertexai.safetyrating.md#safetyratingprobability) | [HarmProbability](./vertexai.md#harmprobability) | | -| [probabilityScore](./vertexai.safetyrating.md#safetyratingprobabilityscore) | number | | -| [severity](./vertexai.safetyrating.md#safetyratingseverity) | [HarmSeverity](./vertexai.md#harmseverity) | | -| [severityScore](./vertexai.safetyrating.md#safetyratingseverityscore) | number | | +| [probabilityScore](./vertexai.safetyrating.md#safetyratingprobabilityscore) | number | This field is not supported in Google AI, so it will default to 0 when using Google AI. | +| [severity](./vertexai.safetyrating.md#safetyratingseverity) | [HarmSeverity](./vertexai.md#harmseverity) | This field is not supported in Google AI, so it will default to HarmSeverity.UNSUPPORTED when using Google AI. | +| [severityScore](./vertexai.safetyrating.md#safetyratingseverityscore) | number | This field is not supported in Google AI, so it will default to 0 when using Google AI. | ## SafetyRating.blocked @@ -55,6 +55,8 @@ probability: HarmProbability; ## SafetyRating.probabilityScore +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript @@ -63,6 +65,8 @@ probabilityScore: number; ## SafetyRating.severity +This field is not supported in Google AI, so it will default to `HarmSeverity.UNSUPPORTED` when using Google AI. + Signature: ```typescript @@ -71,6 +75,8 @@ severity: HarmSeverity; ## SafetyRating.severityScore +This field is not supported in Google AI, so it will default to 0 when using Google AI. + Signature: ```typescript diff --git a/docs-devsite/vertexai.vertexaibackend.md b/docs-devsite/vertexai.vertexaibackend.md new file mode 100644 index 00000000000..cd255e30034 --- /dev/null +++ b/docs-devsite/vertexai.vertexaibackend.md @@ -0,0 +1,58 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# VertexAIBackend class +Represents the configuration class for the Vertex AI backend. Use this with [AIOptions](./vertexai.aioptions.md#aioptions_interface) when initializing the server with [getAI()](./vertexai.md#getai_a94a413). + +Signature: + +```typescript +export declare class VertexAIBackend extends Backend +``` +Extends: [Backend](./vertexai.backend.md#backend_class) + +## Constructors + +| Constructor | Modifiers | Description | +| --- | --- | --- | +| [(constructor)(location)](./vertexai.vertexaibackend.md#vertexaibackendconstructor) | | Creates a configuration object for the Vertex AI backend. | + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [location](./vertexai.vertexaibackend.md#vertexaibackendlocation) | | string | The region identifier. See [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | + +## VertexAIBackend.(constructor) + +Creates a configuration object for the Vertex AI backend. + +Signature: + +```typescript +constructor(location?: string); +``` + +#### Parameters + +| Parameter | Type | Description | +| --- | --- | --- | +| location | string | The region identifier, defaulting to us-central1; see [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. | + +## VertexAIBackend.location + +The region identifier. See [Vertex AI locations](https://firebase.google.com/docs/vertex-ai/locations?platform=ios#available-locations) for a list of supported locations. + +Signature: + +```typescript +readonly location: string; +``` diff --git a/packages/vertexai/src/backwards-compatbility.test.ts b/packages/vertexai/src/backwards-compatbility.test.ts index 62463009b24..da0b613bf21 100644 --- a/packages/vertexai/src/backwards-compatbility.test.ts +++ b/packages/vertexai/src/backwards-compatbility.test.ts @@ -28,6 +28,7 @@ import { } from './api'; import { AI, VertexAI, AIErrorCode } from './public-types'; import { VertexAIBackend } from './backend'; +import { ChromeAdapter } from './methods/chrome-adapter'; function assertAssignable(): void {} @@ -65,7 +66,11 @@ describe('backwards-compatible types', () => { it('AIModel is backwards compatible with VertexAIModel', () => { assertAssignable(); - const model = new GenerativeModel(fakeAI, { model: 'model-name' }); + const model = new GenerativeModel( + fakeAI, + { model: 'model-name' }, + new ChromeAdapter() + ); expect(model).to.be.instanceOf(AIModel); expect(model).to.be.instanceOf(VertexAIModel); }); From fafa6a81f5228be0440b61b70773605351c9be1c Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Thu, 24 Apr 2025 11:15:37 -0700 Subject: [PATCH 11/16] VinF Hybrid Inference: consolidate onDeviceParams initialization (#8969) --- .../vertexai/src/methods/chrome-adapter.ts | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index 63e1db83e89..32dec23035d 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -41,8 +41,10 @@ export class ChromeAdapter { constructor( private languageModelProvider?: LanguageModel, private mode?: InferenceMode, - private onDeviceParams?: LanguageModelCreateOptions - ) {} + private onDeviceParams: LanguageModelCreateOptions = {} + ) { + this.addImageTypeAsExpectedInput(); + } /** * Checks if a given request can be made on-device. @@ -64,12 +66,8 @@ export class ChromeAdapter { return false; } - const availability = await this.languageModelProvider?.availability(); - - // Triggers async model download so it'll be available next time. - if (availability === Availability.downloadable) { - this.download(); - } + // Triggers out-of-band download so model will eventually become available. + const availability = await this.downloadIfAvailable(); if (this.mode === 'only_on_device') { return true; @@ -91,10 +89,7 @@ export class ChromeAdapter { * @returns {@link Response}, so we can reuse common response formatting. */ async generateContent(request: GenerateContentRequest): Promise { - const session = await this.createSession( - // TODO: normalize on-device params during construction. - this.onDeviceParams || {} - ); + const session = await this.createSession(); // TODO: support multiple content objects when Chrome supports // sequence const contents = await Promise.all( @@ -115,10 +110,7 @@ export class ChromeAdapter { async generateContentStream( request: GenerateContentRequest ): Promise { - const session = await this.createSession( - // TODO: normalize on-device params during construction. - this.onDeviceParams || {} - ); + const session = await this.createSession(); // TODO: support multiple content objects when Chrome supports // sequence const contents = await Promise.all( @@ -155,7 +147,22 @@ export class ChromeAdapter { } /** - * Triggers the download of an on-device model. + * Encapsulates logic to get availability and download a model if one is downloadable. + */ + private async downloadIfAvailable(): Promise { + const availability = await this.languageModelProvider?.availability( + this.onDeviceParams + ); + + if (availability === Availability.downloadable) { + this.download(); + } + + return availability; + } + + /** + * Triggers out-of-band download of an on-device model. * *

Chrome only downloads models as needed. Chrome knows a model is needed when code calls * LanguageModel.create.

@@ -168,10 +175,8 @@ export class ChromeAdapter { return; } this.isDownloading = true; - const options = this.onDeviceParams || {}; - ChromeAdapter.addImageTypeAsExpectedInput(options); this.downloadPromise = this.languageModelProvider - ?.create(options) + ?.create(this.onDeviceParams) .then(() => { this.isDownloading = false; }); @@ -214,19 +219,16 @@ export class ChromeAdapter { *

Chrome will remove a model from memory if it's no longer in use, so this method ensures a * new session is created before an old session is destroyed.

*/ - private async createSession( - // TODO: define a default value, since these are optional. - options: LanguageModelCreateOptions - ): Promise { + private async createSession(): Promise { if (!this.languageModelProvider) { throw new AIError( AIErrorCode.REQUEST_ERROR, 'Chrome AI requested for unsupported browser version.' ); } - // TODO: could we use this.onDeviceParams instead of passing in options? - ChromeAdapter.addImageTypeAsExpectedInput(options); - const newSession = await this.languageModelProvider!.create(options); + const newSession = await this.languageModelProvider.create( + this.onDeviceParams + ); if (this.oldSession) { this.oldSession.destroy(); } @@ -235,11 +237,9 @@ export class ChromeAdapter { return newSession; } - private static addImageTypeAsExpectedInput( - options: LanguageModelCreateOptions - ): void { - options.expectedInputs = options.expectedInputs || []; - options.expectedInputs.push({ type: 'image' }); + private addImageTypeAsExpectedInput(): void { + // Defaults to support image inputs for convenience. + this.onDeviceParams.expectedInputs ??= [{ type: 'image' }]; } /** From 6c60de53caa831bbd3ef4362ea5f30b63c2a4eb9 Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Fri, 25 Apr 2025 15:06:54 -0700 Subject: [PATCH 12/16] VinF Hybrid Inference: disable multi-turn support (#8973) --- packages/vertexai/src/methods/chrome-adapter.test.ts | 4 ++-- packages/vertexai/src/methods/chrome-adapter.ts | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts index 859a02a4e85..f9233c637e2 100644 --- a/packages/vertexai/src/methods/chrome-adapter.test.ts +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -82,7 +82,7 @@ describe('ChromeAdapter', () => { }) ).to.be.false; }); - it('returns false if request content has function role', async () => { + it('returns false if request content has non-user role', async () => { const adapter = new ChromeAdapter( { availability: async () => Availability.available @@ -93,7 +93,7 @@ describe('ChromeAdapter', () => { await adapter.isAvailable({ contents: [ { - role: 'function', + role: 'model', parts: [] } ] diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index 32dec23035d..c175d64f603 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -136,9 +136,10 @@ export class ChromeAdapter { return false; } - // Applies the same checks as above, but for each content item. for (const content of request.contents) { - if (content.role === 'function') { + // Returns false if the request contains multiple roles, eg a chat history. + // TODO: remove this guard once LanguageModelMessage is supported. + if (content.role !== 'user') { return false; } } From dc3794a2b66ba0a00356ed5c676960163d7a4ed5 Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Fri, 25 Apr 2025 15:08:03 -0700 Subject: [PATCH 13/16] VinF Hybrid Inference: remove default expected input types (#8974) --- .../src/methods/chrome-adapter.test.ts | 25 +++++++------------ .../vertexai/src/methods/chrome-adapter.ts | 9 +------ 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts index f9233c637e2..abdbd08c401 100644 --- a/packages/vertexai/src/methods/chrome-adapter.test.ts +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -122,13 +122,14 @@ describe('ChromeAdapter', () => { const createStub = stub(languageModelProvider, 'create').resolves( {} as LanguageModel ); - const adapter = new ChromeAdapter( - languageModelProvider, - 'prefer_on_device' - ); const expectedOnDeviceParams = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + expectedOnDeviceParams + ); expect( await adapter.isAvailable({ contents: [{ role: 'user', parts: [{ text: 'hi' }] }] @@ -221,9 +222,6 @@ describe('ChromeAdapter', () => { ); const promptOutput = 'hi'; const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); - const onDeviceParams = { - systemPrompt: 'be yourself' - } as LanguageModelCreateOptions; const expectedOnDeviceParams = { systemPrompt: 'be yourself', expectedInputs: [{ type: 'image' }] @@ -231,7 +229,7 @@ describe('ChromeAdapter', () => { const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - onDeviceParams + expectedOnDeviceParams ); const request = { contents: [{ role: 'user', parts: [{ text: 'anything' }] }] @@ -270,9 +268,6 @@ describe('ChromeAdapter', () => { ); const promptOutput = 'hi'; const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); - const onDeviceParams = { - systemPrompt: 'be yourself' - } as LanguageModelCreateOptions; const expectedOnDeviceParams = { systemPrompt: 'be yourself', expectedInputs: [{ type: 'image' }] @@ -280,7 +275,7 @@ describe('ChromeAdapter', () => { const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - onDeviceParams + expectedOnDeviceParams ); const request = { contents: [ @@ -379,14 +374,13 @@ describe('ChromeAdapter', () => { } }) ); - const onDeviceParams = {} as LanguageModelCreateOptions; const expectedOnDeviceParams = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - onDeviceParams + expectedOnDeviceParams ); const request = { contents: [{ role: 'user', parts: [{ text: 'anything' }] }] @@ -423,14 +417,13 @@ describe('ChromeAdapter', () => { } }) ); - const onDeviceParams = {} as LanguageModelCreateOptions; const expectedOnDeviceParams = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - onDeviceParams + expectedOnDeviceParams ); const request = { contents: [ diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index c175d64f603..521e9ca7101 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -42,9 +42,7 @@ export class ChromeAdapter { private languageModelProvider?: LanguageModel, private mode?: InferenceMode, private onDeviceParams: LanguageModelCreateOptions = {} - ) { - this.addImageTypeAsExpectedInput(); - } + ) {} /** * Checks if a given request can be made on-device. @@ -238,11 +236,6 @@ export class ChromeAdapter { return newSession; } - private addImageTypeAsExpectedInput(): void { - // Defaults to support image inputs for convenience. - this.onDeviceParams.expectedInputs ??= [{ type: 'image' }]; - } - /** * Formats string returned by Chrome as a {@link Response} returned by Vertex. */ From 31261ca31fff9b70a5b8f363107334ac649052af Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Tue, 29 Apr 2025 15:42:41 -0700 Subject: [PATCH 14/16] VinF Hybrid Inference: set image (and text) as default input type (#8984) --- .../src/methods/chrome-adapter.test.ts | 90 ++++++++++++++++++- .../vertexai/src/methods/chrome-adapter.ts | 23 ++++- 2 files changed, 111 insertions(+), 2 deletions(-) diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts index abdbd08c401..550b87c9e0b 100644 --- a/packages/vertexai/src/methods/chrome-adapter.test.ts +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -52,6 +52,59 @@ async function toStringArray( } describe('ChromeAdapter', () => { + describe('constructor', () => { + it('sets image as expected input type by default', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.available) + } as LanguageModel; + const availabilityStub = stub( + languageModelProvider, + 'availability' + ).resolves(Availability.available); + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device' + ); + await adapter.isAvailable({ + contents: [ + { + role: 'user', + parts: [{ text: 'hi' }] + } + ] + }); + expect(availabilityStub).to.have.been.calledWith({ + expectedInputs: [{ type: 'image' }] + }); + }); + it('honors explicitly set expected inputs', async () => { + const languageModelProvider = { + availability: () => Promise.resolve(Availability.available) + } as LanguageModel; + const availabilityStub = stub( + languageModelProvider, + 'availability' + ).resolves(Availability.available); + const onDeviceParams = { + // Explicitly sets expected inputs. + expectedInputs: [{ type: 'text' }] + } as LanguageModelCreateOptions; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + onDeviceParams + ); + await adapter.isAvailable({ + contents: [ + { + role: 'user', + parts: [{ text: 'hi' }] + } + ] + }); + expect(availabilityStub).to.have.been.calledWith(onDeviceParams); + }); + }); describe('isAvailable', () => { it('returns false if mode is only cloud', async () => { const adapter = new ChromeAdapter(undefined, 'only_in_cloud'); @@ -100,6 +153,33 @@ describe('ChromeAdapter', () => { }) ).to.be.false; }); + it('returns true if request has image with supported mime type', async () => { + const adapter = new ChromeAdapter( + { + availability: async () => Availability.available + } as LanguageModel, + 'prefer_on_device' + ); + for (const mimeType of ChromeAdapter.SUPPORTED_MIME_TYPES) { + expect( + await adapter.isAvailable({ + contents: [ + { + role: 'user', + parts: [ + { + inlineData: { + mimeType, + data: '' + } + } + ] + } + ] + }) + ).to.be.true; + } + }); it('returns true if model is readily available', async () => { const languageModelProvider = { availability: () => Promise.resolve(Availability.available) @@ -110,7 +190,15 @@ describe('ChromeAdapter', () => { ); expect( await adapter.isAvailable({ - contents: [{ role: 'user', parts: [{ text: 'hi' }] }] + contents: [ + { + role: 'user', + parts: [ + { text: 'describe this image' }, + { inlineData: { mimeType: 'image/jpeg', data: 'asd' } } + ] + } + ] }) ).to.be.true; }); diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index 521e9ca7101..9ac8f350a02 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -35,6 +35,8 @@ import { * and encapsulates logic for detecting when on-device is possible. */ export class ChromeAdapter { + // Visible for testing + static SUPPORTED_MIME_TYPES = ['image/jpeg', 'image/png']; private isDownloading = false; private downloadPromise: Promise | undefined; private oldSession: LanguageModel | undefined; @@ -42,7 +44,9 @@ export class ChromeAdapter { private languageModelProvider?: LanguageModel, private mode?: InferenceMode, private onDeviceParams: LanguageModelCreateOptions = {} - ) {} + ) { + this.addImageTypeAsExpectedInput(); + } /** * Checks if a given request can be made on-device. @@ -140,6 +144,18 @@ export class ChromeAdapter { if (content.role !== 'user') { return false; } + + // Returns false if request contains an image with an unsupported mime type. + for (const part of content.parts) { + if ( + part.inlineData && + ChromeAdapter.SUPPORTED_MIME_TYPES.indexOf( + part.inlineData.mimeType + ) === -1 + ) { + return false; + } + } } return true; @@ -236,6 +252,11 @@ export class ChromeAdapter { return newSession; } + private addImageTypeAsExpectedInput(): void { + // Defaults to support image inputs for convenience. + this.onDeviceParams.expectedInputs ??= [{ type: 'image' }]; + } + /** * Formats string returned by Chrome as a {@link Response} returned by Vertex. */ From e859c030a0e0c2db990d89cddf407d75f3f13c5f Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Fri, 2 May 2025 14:47:07 -0700 Subject: [PATCH 15/16] VinF Hybrid Inference: log debug messages in conditional logic (#8992) --- e2e/sample-apps/modular.js | 17 ++++++++--- .../vertexai/src/methods/chrome-adapter.ts | 29 ++++++++++++++++--- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/e2e/sample-apps/modular.js b/e2e/sample-apps/modular.js index aeebe19a4b1..abcf829856b 100644 --- a/e2e/sample-apps/modular.js +++ b/e2e/sample-apps/modular.js @@ -314,13 +314,22 @@ async function callVertexAI(app) { console.log('[VERTEXAI] start'); const vertexAI = getVertexAI(app); const model = getGenerativeModel(vertexAI, { - mode: 'only_on_device' + mode: 'prefer_on_device' }); const singleResult = await model.generateContent([ - { text: 'describe the following:' }, - { text: 'the mojave desert' } + { text: 'describe this 20 x 20 px image in two words' }, + { + inlineData: { + mimeType: 'image/heic', + data: 'AAAAGGZ0eXBoZWljAAAAAGhlaWNtaWYxAAAB7G1ldGEAAAAAAAAAIWhkbHIAAAAAAAAAAHBpY3QAAAAAAAAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAAAAABAAAADHVybCAAAAABAAAADnBpdG0AAAAAAAEAAAA4aWluZgAAAAAAAgAAABVpbmZlAgAAAAABAABodmMxAAAAABVpbmZlAgAAAQACAABFeGlmAAAAABppcmVmAAAAAAAAAA5jZHNjAAIAAQABAAABD2lwcnAAAADtaXBjbwAAABNjb2xybmNseAACAAIABoAAAAAMY2xsaQDLAEAAAAAUaXNwZQAAAAAAAAAUAAAADgAAAChjbGFwAAAAFAAAAAEAAAANAAAAAQAAAAAAAAAB/8AAAACAAAAAAAAJaXJvdAAAAAAQcGl4aQAAAAADCAgIAAAAcWh2Y0MBA3AAAACwAAAAAAAe8AD8/fj4AAALA6AAAQAXQAEMAf//A3AAAAMAsAAAAwAAAwAecCShAAEAI0IBAQNwAAADALAAAAMAAAMAHqAUIEHAjw1iHuRZVNwICBgCogABAAlEAcBhcshAUyQAAAAaaXBtYQAAAAAAAAABAAEHgQIDhIUGhwAAACxpbG9jAAAAAEQAAAIAAQAAAAEAAAJsAAABDAACAAAAAQAAAhQAAABYAAAAAW1kYXQAAAAAAAABdAAAAAZFeGlmAABNTQAqAAAACAAEARIAAwAAAAEAAQAAARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAAAAAAAAAAEgAAAABAAAASAAAAAEAAAEIKAGvoR8wDimTiRYUbALiHkU3ZdZ8DXAcSrRB9GARtVQHvnCE0LEyBGAyb5P4eYr6JAK5UxNX10WNlARq3ZpcGeVD+Xom6LodYasuZKKtDHCz/xnswOtC/ksZzVKhtWQqGvkXcsJnLYqWevNkacnccQ95jbHJBg9nXub69jAAN3xhNOXxjGSxaG9QvES5R7sYICEojRjLF5OB5K3v+okQAwfgWpz/u21ayideOgOZQLAyBkKOv7ymLNCagiPWTlHAuy/3qR1Q7m2ERFaxKIAbLSkIVO/P8m8+anKxhzhC//L8NMAUoF+Sf3aEH9O41fwLc+PlcbrDrjgY2EboD3cn9DyN32Rum2Ym' + } + } ]); console.log(`Generated text: ${singleResult.response.text()}`); + const chat = model.startChat(); + let chatResult = await chat.sendMessage('describe red in two words'); + chatResult = await chat.sendMessage('describe blue'); + console.log('Chat history:', await chat.getHistory()); console.log(`[VERTEXAI] end`); } @@ -345,7 +354,7 @@ function callDataConnect(app) { async function main() { console.log('FIREBASE VERSION', SDK_VERSION); const app = initializeApp(config); - setLogLevel('warn'); + setLogLevel('debug'); // callAppCheck(app); // await authLogin(app); diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index 9ac8f350a02..9ba674937a8 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -16,6 +16,7 @@ */ import { AIError } from '../errors'; +import { logger } from '../logger'; import { CountTokensRequest, GenerateContentRequest, @@ -65,6 +66,9 @@ export class ChromeAdapter { */ async isAvailable(request: GenerateContentRequest): Promise { if (this.mode === 'only_in_cloud') { + logger.debug( + `On-device inference unavailable because mode is "only_in_cloud".` + ); return false; } @@ -76,10 +80,20 @@ export class ChromeAdapter { } // Applies prefer_on_device logic. - return ( - availability === Availability.available && - ChromeAdapter.isOnDeviceRequest(request) - ); + if (availability !== Availability.available) { + logger.debug( + `On-device inference unavailable because availability is "${availability}".` + ); + return false; + } + if (!ChromeAdapter.isOnDeviceRequest(request)) { + logger.debug( + `On-device inference unavailable because request is incompatible.` + ); + return false; + } + + return true; } /** @@ -135,6 +149,7 @@ export class ChromeAdapter { private static isOnDeviceRequest(request: GenerateContentRequest): boolean { // Returns false if the prompt is empty. if (request.contents.length === 0) { + logger.debug('Empty prompt rejected for on-device inference.'); return false; } @@ -142,6 +157,9 @@ export class ChromeAdapter { // Returns false if the request contains multiple roles, eg a chat history. // TODO: remove this guard once LanguageModelMessage is supported. if (content.role !== 'user') { + logger.debug( + `Non-user role "${content.role}" rejected for on-device inference.` + ); return false; } @@ -153,6 +171,9 @@ export class ChromeAdapter { part.inlineData.mimeType ) === -1 ) { + logger.debug( + `Unsupported mime type "${part.inlineData.mimeType}" rejected for on-device inference.` + ); return false; } } From 07c75ea17d8b97b934a140ef40f85b48f4dc59b5 Mon Sep 17 00:00:00 2001 From: Erik Eldridge Date: Mon, 12 May 2025 12:50:56 -0700 Subject: [PATCH 16/16] VinF Hybrid Inference: support structured output (#9009) --- common/api-review/vertexai.api.md | 15 ++- docs-devsite/_toc.yaml | 2 + docs-devsite/vertexai.hybridparams.md | 4 +- docs-devsite/vertexai.md | 1 + docs-devsite/vertexai.ondeviceparams.md | 42 +++++++ e2e/sample-apps/modular.js | 50 +++++--- e2e/webpack.config.js | 2 +- .../src/methods/chrome-adapter.test.ts | 108 +++++++++++++++--- .../vertexai/src/methods/chrome-adapter.ts | 36 +++--- packages/vertexai/src/types/language-model.ts | 5 +- packages/vertexai/src/types/requests.ts | 15 ++- 11 files changed, 222 insertions(+), 58 deletions(-) create mode 100644 docs-devsite/vertexai.ondeviceparams.md diff --git a/common/api-review/vertexai.api.md b/common/api-review/vertexai.api.md index 8758d25bdce..05977b66bd9 100644 --- a/common/api-review/vertexai.api.md +++ b/common/api-review/vertexai.api.md @@ -553,8 +553,7 @@ export enum HarmSeverity { export interface HybridParams { inCloudParams?: ModelParams; mode: InferenceMode; - // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts - onDeviceParams?: LanguageModelCreateOptions; + onDeviceParams?: OnDeviceParams; } // @beta @@ -718,6 +717,18 @@ export interface ObjectSchemaInterface extends SchemaInterface { type: SchemaType.OBJECT; } +// @public +export interface OnDeviceParams { + // Warning: (ae-forgotten-export) The symbol "LanguageModelCreateOptions" needs to be exported by the entry point index.d.ts + // + // (undocumented) + createOptions?: LanguageModelCreateOptions; + // Warning: (ae-forgotten-export) The symbol "LanguageModelPromptOptions" needs to be exported by the entry point index.d.ts + // + // (undocumented) + promptOptions?: LanguageModelPromptOptions; +} + // @public export type Part = TextPart | InlineDataPart | FunctionCallPart | FunctionResponsePart | FileDataPart; diff --git a/docs-devsite/_toc.yaml b/docs-devsite/_toc.yaml index c1a10429ad7..405d11bfc01 100644 --- a/docs-devsite/_toc.yaml +++ b/docs-devsite/_toc.yaml @@ -562,6 +562,8 @@ toc: path: /docs/reference/js/vertexai.objectschema.md - title: ObjectSchemaInterface path: /docs/reference/js/vertexai.objectschemainterface.md + - title: OnDeviceParams + path: /docs/reference/js/vertexai.ondeviceparams.md - title: PromptFeedback path: /docs/reference/js/vertexai.promptfeedback.md - title: RequestOptions diff --git a/docs-devsite/vertexai.hybridparams.md b/docs-devsite/vertexai.hybridparams.md index cf847b40fa7..9e1e4f9be15 100644 --- a/docs-devsite/vertexai.hybridparams.md +++ b/docs-devsite/vertexai.hybridparams.md @@ -24,7 +24,7 @@ export interface HybridParams | --- | --- | --- | | [inCloudParams](./vertexai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. | | [mode](./vertexai.hybridparams.md#hybridparamsmode) | [InferenceMode](./vertexai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. | -| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | LanguageModelCreateOptions | Optional. Specifies advanced params for on-device inference. | +| [onDeviceParams](./vertexai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./vertexai.ondeviceparams.md#ondeviceparams_interface) | Optional. Specifies advanced params for on-device inference. | ## HybridParams.inCloudParams @@ -53,5 +53,5 @@ Optional. Specifies advanced params for on-device inference. Signature: ```typescript -onDeviceParams?: LanguageModelCreateOptions; +onDeviceParams?: OnDeviceParams; ``` diff --git a/docs-devsite/vertexai.md b/docs-devsite/vertexai.md index 46eafd41e80..034af9bae90 100644 --- a/docs-devsite/vertexai.md +++ b/docs-devsite/vertexai.md @@ -108,6 +108,7 @@ The Firebase AI Web SDK. | [ModalityTokenCount](./vertexai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | | [ModelParams](./vertexai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [ObjectSchemaInterface](./vertexai.objectschemainterface.md#objectschemainterface_interface) | Interface for [ObjectSchema](./vertexai.objectschema.md#objectschema_class) class. | +| [OnDeviceParams](./vertexai.ondeviceparams.md#ondeviceparams_interface) | Encapsulates configuration for on-device inference. | | [PromptFeedback](./vertexai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | | [RequestOptions](./vertexai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./vertexai.md#getgenerativemodel_c63f46a). | | [RetrievedContextAttribution](./vertexai.retrievedcontextattribution.md#retrievedcontextattribution_interface) | | diff --git a/docs-devsite/vertexai.ondeviceparams.md b/docs-devsite/vertexai.ondeviceparams.md new file mode 100644 index 00000000000..3dae308f5e1 --- /dev/null +++ b/docs-devsite/vertexai.ondeviceparams.md @@ -0,0 +1,42 @@ +Project: /docs/reference/js/_project.yaml +Book: /docs/reference/_book.yaml +page_type: reference + +{% comment %} +DO NOT EDIT THIS FILE! +This is generated by the JS SDK team, and any local changes will be +overwritten. Changes should be made in the source code at +https://github.com/firebase/firebase-js-sdk +{% endcomment %} + +# OnDeviceParams interface +Encapsulates configuration for on-device inference. + +Signature: + +```typescript +export interface OnDeviceParams +``` + +## Properties + +| Property | Type | Description | +| --- | --- | --- | +| [createOptions](./vertexai.ondeviceparams.md#ondeviceparamscreateoptions) | LanguageModelCreateOptions | | +| [promptOptions](./vertexai.ondeviceparams.md#ondeviceparamspromptoptions) | LanguageModelPromptOptions | | + +## OnDeviceParams.createOptions + +Signature: + +```typescript +createOptions?: LanguageModelCreateOptions; +``` + +## OnDeviceParams.promptOptions + +Signature: + +```typescript +promptOptions?: LanguageModelPromptOptions; +``` diff --git a/e2e/sample-apps/modular.js b/e2e/sample-apps/modular.js index abcf829856b..1617b2aab60 100644 --- a/e2e/sample-apps/modular.js +++ b/e2e/sample-apps/modular.js @@ -58,7 +58,7 @@ import { onValue, off } from 'firebase/database'; -import { getGenerativeModel, getVertexAI } from 'firebase/vertexai'; +import { getGenerativeModel, getVertexAI, Schema } from 'firebase/vertexai'; import { getDataConnect, DataConnect } from 'firebase/data-connect'; /** @@ -313,23 +313,43 @@ function callPerformance(app) { async function callVertexAI(app) { console.log('[VERTEXAI] start'); const vertexAI = getVertexAI(app); - const model = getGenerativeModel(vertexAI, { - mode: 'prefer_on_device' + + const jsonSchema = Schema.object({ + properties: { + characters: Schema.array({ + items: Schema.object({ + properties: { + name: Schema.string(), + accessory: Schema.string(), + age: Schema.number(), + species: Schema.string() + }, + optionalProperties: ['accessory'] + }) + }) + } }); - const singleResult = await model.generateContent([ - { text: 'describe this 20 x 20 px image in two words' }, - { - inlineData: { - mimeType: 'image/heic', - data: 'AAAAGGZ0eXBoZWljAAAAAGhlaWNtaWYxAAAB7G1ldGEAAAAAAAAAIWhkbHIAAAAAAAAAAHBpY3QAAAAAAAAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAAAAABAAAADHVybCAAAAABAAAADnBpdG0AAAAAAAEAAAA4aWluZgAAAAAAAgAAABVpbmZlAgAAAAABAABodmMxAAAAABVpbmZlAgAAAQACAABFeGlmAAAAABppcmVmAAAAAAAAAA5jZHNjAAIAAQABAAABD2lwcnAAAADtaXBjbwAAABNjb2xybmNseAACAAIABoAAAAAMY2xsaQDLAEAAAAAUaXNwZQAAAAAAAAAUAAAADgAAAChjbGFwAAAAFAAAAAEAAAANAAAAAQAAAAAAAAAB/8AAAACAAAAAAAAJaXJvdAAAAAAQcGl4aQAAAAADCAgIAAAAcWh2Y0MBA3AAAACwAAAAAAAe8AD8/fj4AAALA6AAAQAXQAEMAf//A3AAAAMAsAAAAwAAAwAecCShAAEAI0IBAQNwAAADALAAAAMAAAMAHqAUIEHAjw1iHuRZVNwICBgCogABAAlEAcBhcshAUyQAAAAaaXBtYQAAAAAAAAABAAEHgQIDhIUGhwAAACxpbG9jAAAAAEQAAAIAAQAAAAEAAAJsAAABDAACAAAAAQAAAhQAAABYAAAAAW1kYXQAAAAAAAABdAAAAAZFeGlmAABNTQAqAAAACAAEARIAAwAAAAEAAQAAARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAAAAAAAAAAEgAAAABAAAASAAAAAEAAAEIKAGvoR8wDimTiRYUbALiHkU3ZdZ8DXAcSrRB9GARtVQHvnCE0LEyBGAyb5P4eYr6JAK5UxNX10WNlARq3ZpcGeVD+Xom6LodYasuZKKtDHCz/xnswOtC/ksZzVKhtWQqGvkXcsJnLYqWevNkacnccQ95jbHJBg9nXub69jAAN3xhNOXxjGSxaG9QvES5R7sYICEojRjLF5OB5K3v+okQAwfgWpz/u21ayideOgOZQLAyBkKOv7ymLNCagiPWTlHAuy/3qR1Q7m2ERFaxKIAbLSkIVO/P8m8+anKxhzhC//L8NMAUoF+Sf3aEH9O41fwLc+PlcbrDrjgY2EboD3cn9DyN32Rum2Ym' + + const model = getGenerativeModel(vertexAI, { + // mode: 'prefer_on_device', + mode: 'only_in_cloud', + inCloudParams: { + generationConfig: { + responseMimeType: 'application/json', + responseSchema: jsonSchema + } + }, + onDeviceParams: { + promptOptions: { + responseConstraint: jsonSchema } } - ]); - console.log(`Generated text: ${singleResult.response.text()}`); - const chat = model.startChat(); - let chatResult = await chat.sendMessage('describe red in two words'); - chatResult = await chat.sendMessage('describe blue'); - console.log('Chat history:', await chat.getHistory()); + }); + + const singleResult = await model.generateContent( + "For use in a children's card game, generate 10 animal-based characters." + ); + console.log(`Generated text:`, JSON.parse(singleResult.response.text())); console.log(`[VERTEXAI] end`); } diff --git a/e2e/webpack.config.js b/e2e/webpack.config.js index b2e4c25f62e..b2c6a64f17e 100644 --- a/e2e/webpack.config.js +++ b/e2e/webpack.config.js @@ -88,7 +88,7 @@ module.exports = [ stats: { colors: true }, - devtool: 'source-map', + devtool: 'eval-source-map', devServer: { static: './build' } diff --git a/packages/vertexai/src/methods/chrome-adapter.test.ts b/packages/vertexai/src/methods/chrome-adapter.test.ts index 550b87c9e0b..fbe7ec1a5c5 100644 --- a/packages/vertexai/src/methods/chrome-adapter.test.ts +++ b/packages/vertexai/src/methods/chrome-adapter.test.ts @@ -28,6 +28,7 @@ import { } from '../types/language-model'; import { match, stub } from 'sinon'; import { GenerateContentRequest, AIErrorCode } from '../types'; +import { Schema } from '../api'; use(sinonChai); use(chaiAsPromised); @@ -85,14 +86,16 @@ describe('ChromeAdapter', () => { languageModelProvider, 'availability' ).resolves(Availability.available); - const onDeviceParams = { + const createOptions = { // Explicitly sets expected inputs. expectedInputs: [{ type: 'text' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - onDeviceParams + { + createOptions + } ); await adapter.isAvailable({ contents: [ @@ -102,7 +105,7 @@ describe('ChromeAdapter', () => { } ] }); - expect(availabilityStub).to.have.been.calledWith(onDeviceParams); + expect(availabilityStub).to.have.been.calledWith(createOptions); }); }); describe('isAvailable', () => { @@ -210,20 +213,20 @@ describe('ChromeAdapter', () => { const createStub = stub(languageModelProvider, 'create').resolves( {} as LanguageModel ); - const expectedOnDeviceParams = { + const createOptions = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - expectedOnDeviceParams + { createOptions } ); expect( await adapter.isAvailable({ contents: [{ role: 'user', parts: [{ text: 'hi' }] }] }) ).to.be.false; - expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(createStub).to.have.been.calledOnceWith(createOptions); }); it('avoids redundant downloads', async () => { const languageModelProvider = { @@ -310,21 +313,21 @@ describe('ChromeAdapter', () => { ); const promptOutput = 'hi'; const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); - const expectedOnDeviceParams = { + const createOptions = { systemPrompt: 'be yourself', expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - expectedOnDeviceParams + { createOptions } ); const request = { contents: [{ role: 'user', parts: [{ text: 'anything' }] }] } as GenerateContentRequest; const response = await adapter.generateContent(request); // Asserts initialization params are proxied. - expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(createStub).to.have.been.calledOnceWith(createOptions); // Asserts Vertex input type is mapped to Chrome type. expect(promptStub).to.have.been.calledOnceWith([ { @@ -356,14 +359,14 @@ describe('ChromeAdapter', () => { ); const promptOutput = 'hi'; const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); - const expectedOnDeviceParams = { + const createOptions = { systemPrompt: 'be yourself', expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - expectedOnDeviceParams + { createOptions } ); const request = { contents: [ @@ -383,7 +386,7 @@ describe('ChromeAdapter', () => { } as GenerateContentRequest; const response = await adapter.generateContent(request); // Asserts initialization params are proxied. - expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(createStub).to.have.been.calledOnceWith(createOptions); // Asserts Vertex input type is mapped to Chrome type. expect(promptStub).to.have.been.calledOnceWith([ { @@ -406,6 +409,40 @@ describe('ChromeAdapter', () => { ] }); }); + it('honors prompt options', async () => { + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + prompt: (p: LanguageModelMessageContent[]) => Promise.resolve('') + } as LanguageModel; + const languageModelProvider = { + create: () => Promise.resolve(languageModel) + } as LanguageModel; + const promptOutput = '{}'; + const promptStub = stub(languageModel, 'prompt').resolves(promptOutput); + const promptOptions = { + responseConstraint: Schema.object({ + properties: {} + }) + }; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + { promptOptions } + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + await adapter.generateContent(request); + expect(promptStub).to.have.been.calledOnceWith( + [ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ], + promptOptions + ); + }); }); describe('countTokens', () => { it('counts tokens is not yet available', async () => { @@ -462,19 +499,19 @@ describe('ChromeAdapter', () => { } }) ); - const expectedOnDeviceParams = { + const createOptions = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - expectedOnDeviceParams + { createOptions } ); const request = { contents: [{ role: 'user', parts: [{ text: 'anything' }] }] } as GenerateContentRequest; const response = await adapter.generateContentStream(request); - expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(createStub).to.have.been.calledOnceWith(createOptions); expect(promptStub).to.have.been.calledOnceWith([ { type: 'text', @@ -505,13 +542,13 @@ describe('ChromeAdapter', () => { } }) ); - const expectedOnDeviceParams = { + const createOptions = { expectedInputs: [{ type: 'image' }] } as LanguageModelCreateOptions; const adapter = new ChromeAdapter( languageModelProvider, 'prefer_on_device', - expectedOnDeviceParams + { createOptions } ); const request = { contents: [ @@ -530,7 +567,7 @@ describe('ChromeAdapter', () => { ] } as GenerateContentRequest; const response = await adapter.generateContentStream(request); - expect(createStub).to.have.been.calledOnceWith(expectedOnDeviceParams); + expect(createStub).to.have.been.calledOnceWith(createOptions); expect(promptStub).to.have.been.calledOnceWith([ { type: 'text', @@ -546,6 +583,41 @@ describe('ChromeAdapter', () => { `data: {"candidates":[{"content":{"role":"model","parts":[{"text":["${part}"]}]}}]}\n\n` ]); }); + it('honors prompt options', async () => { + const languageModel = { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + promptStreaming: p => new ReadableStream() + } as LanguageModel; + const languageModelProvider = { + create: () => Promise.resolve(languageModel) + } as LanguageModel; + const promptStub = stub(languageModel, 'promptStreaming').returns( + new ReadableStream() + ); + const promptOptions = { + responseConstraint: Schema.object({ + properties: {} + }) + }; + const adapter = new ChromeAdapter( + languageModelProvider, + 'prefer_on_device', + { promptOptions } + ); + const request = { + contents: [{ role: 'user', parts: [{ text: 'anything' }] }] + } as GenerateContentRequest; + await adapter.generateContentStream(request); + expect(promptStub).to.have.been.calledOnceWith( + [ + { + type: 'text', + content: request.contents[0].parts[0].text + } + ], + promptOptions + ); + }); }); }); diff --git a/packages/vertexai/src/methods/chrome-adapter.ts b/packages/vertexai/src/methods/chrome-adapter.ts index 9ba674937a8..aa3709048a2 100644 --- a/packages/vertexai/src/methods/chrome-adapter.ts +++ b/packages/vertexai/src/methods/chrome-adapter.ts @@ -22,12 +22,12 @@ import { GenerateContentRequest, InferenceMode, Part, - AIErrorCode + AIErrorCode, + OnDeviceParams } from '../types'; import { Availability, LanguageModel, - LanguageModelCreateOptions, LanguageModelMessageContent } from '../types/language-model'; @@ -44,10 +44,13 @@ export class ChromeAdapter { constructor( private languageModelProvider?: LanguageModel, private mode?: InferenceMode, - private onDeviceParams: LanguageModelCreateOptions = {} - ) { - this.addImageTypeAsExpectedInput(); - } + private onDeviceParams: OnDeviceParams = { + createOptions: { + // Defaults to support image inputs for convenience. + expectedInputs: [{ type: 'image' }] + } + } + ) {} /** * Checks if a given request can be made on-device. @@ -111,7 +114,10 @@ export class ChromeAdapter { const contents = await Promise.all( request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) ); - const text = await session.prompt(contents); + const text = await session.prompt( + contents, + this.onDeviceParams.promptOptions + ); return ChromeAdapter.toResponse(text); } @@ -132,7 +138,10 @@ export class ChromeAdapter { const contents = await Promise.all( request.contents[0].parts.map(ChromeAdapter.toLanguageModelMessageContent) ); - const stream = await session.promptStreaming(contents); + const stream = await session.promptStreaming( + contents, + this.onDeviceParams.promptOptions + ); return ChromeAdapter.toStreamResponse(stream); } @@ -187,7 +196,7 @@ export class ChromeAdapter { */ private async downloadIfAvailable(): Promise { const availability = await this.languageModelProvider?.availability( - this.onDeviceParams + this.onDeviceParams.createOptions ); if (availability === Availability.downloadable) { @@ -212,7 +221,7 @@ export class ChromeAdapter { } this.isDownloading = true; this.downloadPromise = this.languageModelProvider - ?.create(this.onDeviceParams) + ?.create(this.onDeviceParams.createOptions) .then(() => { this.isDownloading = false; }); @@ -263,7 +272,7 @@ export class ChromeAdapter { ); } const newSession = await this.languageModelProvider.create( - this.onDeviceParams + this.onDeviceParams.createOptions ); if (this.oldSession) { this.oldSession.destroy(); @@ -273,11 +282,6 @@ export class ChromeAdapter { return newSession; } - private addImageTypeAsExpectedInput(): void { - // Defaults to support image inputs for convenience. - this.onDeviceParams.expectedInputs ??= [{ type: 'image' }]; - } - /** * Formats string returned by Chrome as a {@link Response} returned by Vertex. */ diff --git a/packages/vertexai/src/types/language-model.ts b/packages/vertexai/src/types/language-model.ts index cd84f22dbdb..22916e7ff96 100644 --- a/packages/vertexai/src/types/language-model.ts +++ b/packages/vertexai/src/types/language-model.ts @@ -49,8 +49,9 @@ export interface LanguageModelCreateOptions systemPrompt?: string; initialPrompts?: LanguageModelInitialPrompts; } -interface LanguageModelPromptOptions { - signal?: AbortSignal; +export interface LanguageModelPromptOptions { + responseConstraint?: object; + // TODO: Restore AbortSignal once the API is defined. } interface LanguageModelExpectedInput { type: LanguageModelMessageType; diff --git a/packages/vertexai/src/types/requests.ts b/packages/vertexai/src/types/requests.ts index 36700b5a936..e9d5716e3b4 100644 --- a/packages/vertexai/src/types/requests.ts +++ b/packages/vertexai/src/types/requests.ts @@ -17,7 +17,10 @@ import { TypedSchema } from '../requests/schema-builder'; import { Content, Part } from './content'; -import { LanguageModelCreateOptions } from './language-model'; +import { + LanguageModelCreateOptions, + LanguageModelPromptOptions +} from './language-model'; import { FunctionCallingMode, HarmBlockMethod, @@ -220,6 +223,14 @@ export interface FunctionCallingConfig { allowedFunctionNames?: string[]; } +/** + * Encapsulates configuration for on-device inference. + */ +export interface OnDeviceParams { + createOptions?: LanguageModelCreateOptions; + promptOptions?: LanguageModelPromptOptions; +} + /** * Toggles hybrid inference. */ @@ -231,7 +242,7 @@ export interface HybridParams { /** * Optional. Specifies advanced params for on-device inference. */ - onDeviceParams?: LanguageModelCreateOptions; + onDeviceParams?: OnDeviceParams; /** * Optional. Specifies advanced params for in-cloud inference. */