diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts index e338559be7e..80c1e180faf 100644 --- a/packages/opencode/src/agent/agent.ts +++ b/packages/opencode/src/agent/agent.ts @@ -283,6 +283,7 @@ export namespace Agent { const cfg = await Config.get() const defaultModel = input.model ?? (await Provider.defaultModel()) const model = await Provider.getModel(defaultModel.providerID, defaultModel.modelID) + const provider = await Provider.getProvider(defaultModel.providerID) const language = await Provider.getLanguage(model) const system = [PROMPT_GENERATE] @@ -317,22 +318,33 @@ export namespace Agent { }), } satisfies Parameters[0] + const ctx = { + model, + provider, + } + if (defaultModel.providerID === "openai" && (await Auth.get(defaultModel.providerID))?.type === "oauth") { - const result = streamObject({ + const call = { ...params, providerOptions: ProviderTransform.providerOptions(model, { instructions: SystemPrompt.instructions(), store: false, }), onError: () => {}, - }) + } + await Plugin.trigger("llm.request.before", { ...ctx, type: "stream" }, { params: call }) + const result = streamObject(call) for await (const part of result.fullStream) { + await Plugin.trigger("llm.stream.chunk", { ...ctx, type: "stream" }, { part }) if (part.type === "error") throw part.error } + await Plugin.trigger("llm.response.after", { ...ctx, type: "stream" }, { result: result.object }) return result.object } + await Plugin.trigger("llm.request.before", { ...ctx, type: "generate" }, { params }) const result = await generateObject(params) + await Plugin.trigger("llm.response.after", { ...ctx, type: "generate" }, { result: result.object }) return result.object } } diff --git a/packages/opencode/src/session/llm.ts b/packages/opencode/src/session/llm.ts index 4be6e2538f7..b8ec2047ee3 100644 --- a/packages/opencode/src/session/llm.ts +++ b/packages/opencode/src/session/llm.ts @@ -180,7 +180,17 @@ export namespace LLM { }) } - return streamText({ + const ctx = { + sessionID: input.sessionID, + agent: input.agent, + model: input.model, + provider, + message: input.user, + requestID: input.user.id, + type: "stream", + } + + const call = { onError(error) { l.error("stream error", { error, @@ -262,7 +272,20 @@ export namespace LLM { sessionId: input.sessionID, }, }, - }) + } + + await Plugin.trigger("llm.request.before", ctx, { params: call }) + + const stream = await streamText(call) + const full = stream.fullStream + const fullStream = (async function* () { + for await (const part of full) { + await Plugin.trigger("llm.stream.chunk", ctx, { part }) + yield part + } + })() + + return { ...stream, fullStream } } async function resolveTools(input: Pick) { diff --git a/packages/plugin/src/index.ts b/packages/plugin/src/index.ts index 4cc84a5f325..f4857d9bf0d 100644 --- a/packages/plugin/src/index.ts +++ b/packages/plugin/src/index.ts @@ -23,6 +23,18 @@ export type ProviderContext = { options: Record } +export type LlmPhase = "generate" | "stream" + +export type LlmContext = { + sessionID?: string + agent?: unknown + model?: Model + provider?: unknown + message?: UserMessage + requestID?: string + type: LlmPhase +} + export type PluginInput = { client: ReturnType project: Project @@ -176,6 +188,18 @@ export interface Hooks { input: { sessionID: string; agent: string; model: Model; provider: ProviderContext; message: UserMessage }, output: { headers: Record }, ) => Promise + /** + * Last hook before the request is sent to the provider + */ + "llm.request.before"?: (input: LlmContext, output: { params: Record }) => Promise + /** + * First hook after a non-stream response is received + */ + "llm.response.after"?: (input: LlmContext, output: { result: unknown }) => Promise + /** + * Stream chunk hook before opencode processing + */ + "llm.stream.chunk"?: (input: LlmContext, output: { part: unknown }) => Promise "permission.ask"?: (input: Permission, output: { status: "ask" | "deny" | "allow" }) => Promise "command.execute.before"?: ( input: { command: string; sessionID: string; arguments: string },