diff --git a/agentscope-core/src/main/java/io/agentscope/core/model/GeminiChatModel.java b/agentscope-core/src/main/java/io/agentscope/core/model/GeminiChatModel.java
index ba35cc066..92fc1d645 100644
--- a/agentscope-core/src/main/java/io/agentscope/core/model/GeminiChatModel.java
+++ b/agentscope-core/src/main/java/io/agentscope/core/model/GeminiChatModel.java
@@ -74,6 +74,7 @@ public class GeminiChatModel extends ChatModelBase {
* Creates a new Gemini chat model instance.
*
* @param apiKey the API key for authentication (for Gemini API)
+ * @param baseUrl the custom base URL for Gemini API (null for default)
* @param modelName the model name to use (e.g., "gemini-2.0-flash",
* "gemini-1.5-pro")
* @param streamEnabled whether streaming should be enabled
@@ -90,6 +91,7 @@ public class GeminiChatModel extends ChatModelBase {
*/
public GeminiChatModel(
String apiKey,
+ String baseUrl,
String modelName,
boolean streamEnabled,
String project,
@@ -106,7 +108,7 @@ public GeminiChatModel(
this.project = project;
this.location = location;
this.vertexAI = vertexAI;
- this.httpOptions = httpOptions;
+ this.httpOptions = resolveHttpOptions(baseUrl, httpOptions);
this.credentials = credentials;
this.clientOptions = clientOptions;
this.defaultOptions =
@@ -136,8 +138,8 @@ public GeminiChatModel(
}
// Configure HTTP and client options
- if (httpOptions != null) {
- clientBuilder.httpOptions(httpOptions);
+ if (this.httpOptions != null) {
+ clientBuilder.httpOptions(this.httpOptions);
}
if (clientOptions != null) {
clientBuilder.clientOptions(clientOptions);
@@ -146,6 +148,65 @@ public GeminiChatModel(
this.client = clientBuilder.build();
}
+ /**
+ * Creates a new Gemini chat model instance using the default endpoint configuration.
+ *
+ *
This overload passes {@code null} for {@code baseUrl}, allowing the SDK to use its
+ * default endpoint behavior for either Gemini API or Vertex AI, depending on the provided
+ * configuration.
+ *
+ * @param apiKey the API key for authentication (for Gemini API)
+ * @param modelName the model name to use (e.g., "gemini-2.0-flash",
+ * "gemini-1.5-pro")
+ * @param streamEnabled whether streaming should be enabled
+ * @param project the Google Cloud project ID (for Vertex AI)
+ * @param location the Google Cloud location (for Vertex AI, e.g.,
+ * "us-central1")
+ * @param vertexAI whether to use Vertex AI APIs (null for auto-detection)
+ * @param httpOptions HTTP options for the client
+ * @param credentials Google credentials (for Vertex AI)
+ * @param clientOptions client options for the API client
+ * @param defaultOptions default generation options
+ * @param formatter the message formatter to use (null for default Gemini
+ * formatter)
+ */
+ public GeminiChatModel(
+ String apiKey,
+ String modelName,
+ boolean streamEnabled,
+ String project,
+ String location,
+ Boolean vertexAI,
+ HttpOptions httpOptions,
+ GoogleCredentials credentials,
+ ClientOptions clientOptions,
+ GenerateOptions defaultOptions,
+ Formatter formatter) {
+ this(
+ apiKey,
+ null,
+ modelName,
+ streamEnabled,
+ project,
+ location,
+ vertexAI,
+ httpOptions,
+ credentials,
+ clientOptions,
+ defaultOptions,
+ formatter);
+ }
+
+ private static HttpOptions resolveHttpOptions(String baseUrl, HttpOptions httpOptions) {
+ if (baseUrl == null) {
+ return httpOptions;
+ }
+ if (httpOptions == null) {
+ return HttpOptions.builder().baseUrl(baseUrl).build();
+ }
+ return httpOptions.toBuilder().baseUrl(baseUrl).build();
+ }
+
/**
* Stream chat completion responses from Gemini's API.
*
@@ -281,6 +342,7 @@ public static Builder builder() {
*/
public static class Builder {
private String apiKey;
+ private String baseUrl;
private String modelName = "gemini-2.5-flash";
private boolean streamEnabled = true;
private String project;
@@ -304,6 +366,17 @@ public Builder apiKey(String apiKey) {
return this;
}
+ /**
+ * Sets the custom base URL (for Gemini API).
+ *
+ * @param baseUrl the custom Gemini API base URL
+ * @return this builder
+ */
+ public Builder baseUrl(String baseUrl) {
+ this.baseUrl = baseUrl;
+ return this;
+ }
+
/**
* Sets the model name.
*
@@ -424,6 +497,7 @@ public Builder formatter(
public GeminiChatModel build() {
return new GeminiChatModel(
apiKey,
+ baseUrl,
modelName,
streamEnabled,
project,
diff --git a/agentscope-core/src/test/java/io/agentscope/core/model/GeminiChatModelTest.java b/agentscope-core/src/test/java/io/agentscope/core/model/GeminiChatModelTest.java
index f1b473a29..50e9b48e0 100644
--- a/agentscope-core/src/test/java/io/agentscope/core/model/GeminiChatModelTest.java
+++ b/agentscope-core/src/test/java/io/agentscope/core/model/GeminiChatModelTest.java
@@ -23,6 +23,7 @@
import io.agentscope.core.formatter.gemini.GeminiChatFormatter;
import io.agentscope.core.formatter.gemini.GeminiMultiAgentFormatter;
import io.agentscope.core.model.test.ModelTestUtils;
+import java.lang.reflect.Field;
import java.util.List;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
@@ -300,6 +301,49 @@ void testHttpOptionsConfiguration() {
assertNotNull(modelWithHttpOptions);
}
+ @Test
+ @DisplayName("Should configure custom base URL")
+ void testBaseUrlConfiguration() throws Exception {
+ String baseUrl = "https://custom-gemini-endpoint.example";
+
+ GeminiChatModel model =
+ GeminiChatModel.builder()
+ .apiKey(mockApiKey)
+ .modelName("gemini-2.0-flash")
+ .baseUrl(baseUrl)
+ .build();
+
+ assertNotNull(model);
+ assertEquals(baseUrl, getHttpOptions(model).baseUrl().orElseThrow());
+ }
+
+ @Test
+ @DisplayName("Should override HTTP options base URL while preserving other settings")
+ void testBaseUrlOverridesHttpOptionsBaseUrl() throws Exception {
+ HttpOptions httpOptions =
+ HttpOptions.builder()
+ .baseUrl("https://original-gemini-endpoint.example")
+ .apiVersion("v1beta")
+ .timeout(3210)
+ .build();
+
+ GeminiChatModel model =
+ GeminiChatModel.builder()
+ .apiKey(mockApiKey)
+ .modelName("gemini-2.0-flash")
+ .httpOptions(httpOptions)
+ .baseUrl("https://override-gemini-endpoint.example")
+ .build();
+
+ HttpOptions effectiveHttpOptions = getHttpOptions(model);
+ assertNotNull(effectiveHttpOptions);
+ assertEquals(
+ "https://override-gemini-endpoint.example",
+ effectiveHttpOptions.baseUrl().orElseThrow());
+ assertEquals("v1beta", effectiveHttpOptions.apiVersion().orElseThrow());
+ assertEquals(3210, effectiveHttpOptions.timeout().orElseThrow());
+ }
+
@Test
@DisplayName("Should handle all generation options")
void testAllGenerateOptions() {
@@ -454,4 +498,10 @@ void testNullFormatter() {
assertNotNull(model);
// Should use default GeminiChatFormatter
}
+
+ private static HttpOptions getHttpOptions(GeminiChatModel model) throws Exception {
+ Field httpOptionsField = GeminiChatModel.class.getDeclaredField("httpOptions");
+ httpOptionsField.setAccessible(true);
+ return (HttpOptions) httpOptionsField.get(model);
+ }
}
diff --git a/docs/en/task/model.md b/docs/en/task/model.md
index 8113c1bb4..37b061b5b 100644
--- a/docs/en/task/model.md
+++ b/docs/en/task/model.md
@@ -136,6 +136,7 @@ Google's Gemini series models, supporting both Gemini API and Vertex AI.
GeminiChatModel model = GeminiChatModel.builder()
.apiKey(System.getenv("GEMINI_API_KEY"))
.modelName("gemini-2.5-flash") // Default
+ .baseUrl("https://your-gateway.example") // Optional
.build();
```
@@ -156,6 +157,7 @@ GeminiChatModel model = GeminiChatModel.builder()
| Option | Description |
|--------|-------------|
| `apiKey` | Gemini API key |
+| `baseUrl` | Custom Gemini API endpoint (optional) |
| `modelName` | Model name, default `gemini-2.5-flash` |
| `project` | GCP project ID (Vertex AI) |
| `location` | GCP region (Vertex AI) |
@@ -163,6 +165,8 @@ GeminiChatModel model = GeminiChatModel.builder()
| `credentials` | GCP credentials (Vertex AI) |
| `streamEnabled` | Enable streaming, default `true` |
+For endpoint override, use `baseUrl(...)`. For more advanced transport or proxy setup, continue to use `httpOptions(...)` or `clientOptions(...)`.
+
## Ollama
Self-hosted open-source LLM platform supporting various models.
@@ -419,4 +423,4 @@ DashScopeChatModel model = DashScopeChatModel.builder()
| Single-agent conversation | `ChatFormatter` (default) |
| Pipeline sequential execution | `MultiAgentFormatter` |
| MsgHub group chat | `MultiAgentFormatter` |
-| Multi-agent debate | `MultiAgentFormatter` |
\ No newline at end of file
+| Multi-agent debate | `MultiAgentFormatter` |
diff --git a/docs/zh/task/model.md b/docs/zh/task/model.md
index a273f588a..7b21d71c1 100644
--- a/docs/zh/task/model.md
+++ b/docs/zh/task/model.md
@@ -133,6 +133,7 @@ Google 的 Gemini 系列模型,支持 Gemini API 和 Vertex AI。
GeminiChatModel model = GeminiChatModel.builder()
.apiKey(System.getenv("GEMINI_API_KEY"))
.modelName("gemini-2.5-flash") // 默认值
+ .baseUrl("https://your-gateway.example") // 可选
.build();
```
@@ -153,6 +154,7 @@ GeminiChatModel model = GeminiChatModel.builder()
| 配置项 | 说明 |
|--------|------|
| `apiKey` | Gemini API 密钥 |
+| `baseUrl` | 自定义 Gemini API 端点(可选) |
| `modelName` | 模型名称,默认 `gemini-2.5-flash` |
| `project` | GCP 项目 ID(Vertex AI) |
| `location` | GCP 区域(Vertex AI) |
@@ -160,6 +162,8 @@ GeminiChatModel model = GeminiChatModel.builder()
| `credentials` | GCP 凭证(Vertex AI) |
| `streamEnabled` | 是否启用流式输出,默认 `true` |
+如需覆盖请求端点,可使用 `baseUrl(...)`。更高级的传输层或代理配置,仍建议通过 `httpOptions(...)` 或 `clientOptions(...)` 处理。
+
## Ollama
自托管开源 LLM 平台,支持多种模型。
@@ -415,4 +419,4 @@ DashScopeChatModel model = DashScopeChatModel.builder()
| 单智能体对话 | `ChatFormatter`(默认) |
| Pipeline 顺序执行 | `MultiAgentFormatter` |
| MsgHub 群聊 | `MultiAgentFormatter` |
-| 多智能体辩论 | `MultiAgentFormatter` |
\ No newline at end of file
+| 多智能体辩论 | `MultiAgentFormatter` |