Skip to content

Commit

Permalink
ff-174 Custom API entry points for OpenAI and Gemini clients. (#304)
Browse files Browse the repository at this point in the history
  • Loading branch information
Tiendil authored Dec 16, 2024
1 parent 2951b31 commit 6e92026
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 3 deletions.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,17 @@ You can set the API key for collections in the processor's config.

**DANGER!!!** You can set the "general API key" in the processor's config; in this case, the processor will use it to process **ALL** news. It may be convenient if you self-host the service and fully control who has access to it.

#### Specify API entry points

You can set custom URLs as entry points for OpenAi and Gemini API by setting nthe ext environment variables:

```
FFUN_OPENAI_API_ENTRY_POINT="<your url>"
FFUN_GOOGLE_GEMINI_API_ENTRY_POINT="<your url>"
```

That will allow you to use any compatible API provider.

## Backend

```
Expand Down
2 changes: 1 addition & 1 deletion changes/unreleased.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@

No changes.
- ff-174 — Custom API entry points for OpenAI and Gemini clients.
9 changes: 7 additions & 2 deletions ffun/ffun/openai/provider_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from ffun.llms_framework.entities import KeyStatus, LLMConfiguration, LLMProvider
from ffun.llms_framework.keys_statuses import Statuses
from ffun.llms_framework.provider_interface import ChatRequest, ChatResponse, ProviderInterface
from ffun.openai.settings import settings

logger = logging.get_module_logger()

Expand All @@ -20,6 +21,10 @@ class OpenAIChatRequest(ChatRequest):
messages: list[ChatCompletionMessageParam]


def _client(api_key: str) -> openai.AsyncOpenAI:
return openai.AsyncOpenAI(api_key=api_key, base_url=settings.api_entry_point, timeout=settings.api_timeout)


class OpenAIChatResponse(ChatResponse):
content: str
prompt_tokens: int
Expand Down Expand Up @@ -82,7 +87,7 @@ async def chat_request( # type: ignore
) -> OpenAIChatResponse:
try:
with track_key_status(api_key, self.api_keys_statuses):
answer = await openai.AsyncOpenAI(api_key=api_key).chat.completions.create(
answer = await _client(api_key=api_key).chat.completions.create(
model=config.model,
temperature=float(config.temperature),
max_tokens=config.max_return_tokens,
Expand Down Expand Up @@ -127,7 +132,7 @@ def prepare_requests(self, config: LLMConfiguration, text: str) -> Sequence[Open
async def check_api_key(self, config: LLMConfiguration, api_key: str) -> KeyStatus:
with track_key_status(api_key, self.api_keys_statuses):
try:
await openai.AsyncOpenAI(api_key=api_key).models.list()
await _client(api_key=api_key).models.list()
except openai.APIError:
pass

Expand Down
18 changes: 18 additions & 0 deletions ffun/ffun/openai/settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import pathlib

import pydantic_settings

from ffun.core.settings import BaseSettings

_root = pathlib.Path(__file__).parent


class Settings(BaseSettings):

api_entry_point: str | None = None
api_timeout: float = 20.0

model_config = pydantic_settings.SettingsConfigDict(env_prefix="FFUN_OPENAI_")


settings = Settings()

0 comments on commit 6e92026

Please sign in to comment.