-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathopenapi.yaml
More file actions
341 lines (329 loc) · 9.93 KB
/
openapi.yaml
File metadata and controls
341 lines (329 loc) · 9.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
openapi: 3.0.3
info:
title: Deterministic LLM Gateway
description: |
Production-grade HTTP gateway for LLM providers (OpenAI, Anthropic) with
reliability controls: retries, circuit breakers, rate limiting, idempotency,
structured logging, and schema validation.
**Features:**
- ⚡ Exponential backoff retries
- 🔌 Circuit breakers (Opossum)
- 🚦 Rate limiting per IP
- 🔑 Idempotency key support
- 📊 Pino structured logging
- ✅ Zod schema validation
- 🛡️ Configurable timeouts
version: 0.1.0
contact:
name: maxpetrusenko
url: https://github.com/maxpetrusenko/deterministic-llm-service
license:
name: MIT
servers:
- url: http://localhost:3000
description: Local development server
- url: https://your-production-gateway.com
description: Production server
tags:
- name: health
description: Health check endpoints
- name: chat
description: Chat completion endpoints
paths:
/health:
get:
tags: [health]
summary: Health check
description: Returns the health status of the gateway
operationId: healthCheck
responses:
'200':
description: Gateway is healthy
content:
application/json:
schema:
$ref: '#/components/schemas/HealthResponse'
example:
status: healthy
timestamp: '2025-02-18T19:00:00.000Z'
uptime: 1234.567
requestId: '550e8400-e29b-41d4-a716-446655440000'
/v1/chat/completions:
post:
tags: [chat]
summary: Create chat completion
description: |
Creates a model response for the given chat conversation.
Supports both OpenAI and Anthropic providers via unified interface.
operationId: createChatCompletion
parameters:
- name: X-Request-Id
in: header
description: Unique request identifier for tracing. Auto-generated if not provided.
schema:
type: string
format: uuid
example: '550e8400-e29b-41d4-a716-446655440000'
- name: X-Idempotency-Key
in: header
description: |
Key to prevent duplicate processing. Responses are cached for 1 hour.
If the same key is sent again, the cached response is returned.
schema:
type: string
example: 'my-request-123'
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/ChatCompletionRequest'
examples:
simple:
summary: Simple user message
value:
model: gpt-4o-mini
messages:
- role: user
content: Hello, how are you?
withSystem:
summary: With system prompt
value:
model: gpt-4o-mini
messages:
- role: system
content: You are a helpful assistant.
- role: user
content: Explain quantum computing.
anthropic:
summary: Anthropic provider
value:
model: claude-3-haiku-20240307
provider: anthropic
messages:
- role: user
content: What is the capital of France?
responses:
'200':
description: Successful completion
headers:
X-Request-Id:
description: Request identifier
schema:
type: string
X-Cached:
description: 'true if response was served from idempotency cache'
schema:
type: boolean
X-RateLimit-Limit:
description: Requests per rate limit window
schema:
type: integer
X-RateLimit-Remaining:
description: Remaining requests in current window
schema:
type: integer
X-RateLimit-Reset:
description: ISO timestamp when rate limit resets
schema:
type: string
format: date-time
content:
application/json:
schema:
$ref: '#/components/schemas/ChatCompletionResponse'
example:
id: 'chatcmpl-123'
content: 'Hello! I am doing well, thank you for asking.'
model: gpt-4o-mini
finishReason: stop
usage:
promptTokens: 10
completionTokens: 9
totalTokens: 19
'400':
description: Invalid request (validation error)
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Validation error
details:
- code: invalid_type
expected: string
received: undefined
path: [model]
message: Required
'429':
description: Rate limit exceeded
content:
application/json:
schema:
$ref: '#/components/schemas/RateLimitError'
example:
error: Too many requests
retryAfter: 45
'500':
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/ErrorResponse'
example:
error: Internal server error
requestId: '550e8400-e29b-41d4-a716-446655440000'
components:
schemas:
Message:
type: object
required:
- role
- content
properties:
role:
type: string
enum: [system, user, assistant]
description: The role of the message author
content:
type: string
description: The content of the message
ChatCompletionRequest:
type: object
required:
- model
- messages
properties:
model:
type: string
description: |
Model identifier (e.g., 'gpt-4o-mini', 'claude-3-haiku-20240307').
The model must be supported by the selected provider.
example: gpt-4o-mini
messages:
type: array
minItems: 1
items:
$ref: '#/components/schemas/Message'
description: Array of messages in the conversation
temperature:
type: number
minimum: 0
maximum: 2
description: Sampling temperature (0 = deterministic, 2 = very random)
example: 0.7
maxTokens:
type: number
minimum: 1
description: Maximum tokens to generate in the completion
example: 1000
provider:
type: string
enum: [openai, anthropic]
description: |
LLM provider to use. Defaults to 'openai' or DEFAULT_PROVIDER env var.
example: openai
timeout:
type: number
minimum: 1
default: 30000
description: Request timeout in milliseconds
example: 30000
ChatCompletionResponse:
type: object
required:
- id
- content
- model
- finishReason
- usage
properties:
id:
type: string
description: Unique completion identifier
content:
type: string
description: The generated message content
model:
type: string
description: The model used for the completion
finishReason:
type: string
enum: [stop, length, content_filter]
description: Reason the completion finished
usage:
type: object
required:
- promptTokens
- completionTokens
- totalTokens
properties:
promptTokens:
type: integer
description: Tokens in the prompt
completionTokens:
type: integer
description: Tokens in the completion
totalTokens:
type: integer
description: Total tokens used
HealthResponse:
type: object
required:
- status
- timestamp
- uptime
- requestId
properties:
status:
type: string
enum: [healthy]
description: Health status
timestamp:
type: string
format: date-time
description: Current server time
uptime:
type: number
format: float
description: Server uptime in seconds
requestId:
type: string
format: uuid
description: Request identifier
ErrorResponse:
type: object
required:
- error
properties:
error:
type: string
description: Error message
requestId:
type: string
format: uuid
description: Request identifier for debugging
details:
type: array
description: Detailed validation errors (for Zod validation failures)
items:
type: object
RateLimitError:
type: object
required:
- error
- retryAfter
properties:
error:
type: string
example: Too many requests
retryAfter:
type: integer
description: Seconds until rate limit resets
securitySchemes:
BearerAuth:
type: http
scheme: bearer
description: |
API authentication (not yet implemented - placeholder for future)
Current implementation uses environment variables for provider keys.