-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpython_inference_server.py
More file actions
executable file
·58 lines (46 loc) · 1.7 KB
/
python_inference_server.py
File metadata and controls
executable file
·58 lines (46 loc) · 1.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import json
import sys
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
print("Loading model...", file=sys.stderr)
# Use a small but real model that works
model_name = "microsoft/DialoGPT-small"
try:
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create pipeline
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
print("Model loaded successfully!", file=sys.stderr)
# Read input from stdin
while True:
try:
line = input()
data = json.loads(line)
prompt = data.get('prompt', '')
# Generate response
result = generator(
prompt,
max_length=data.get('maxTokens', 100),
temperature=data.get('temperature', 0.7),
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
response_text = result[0]['generated_text']
# Remove the input prompt from response
if response_text.startswith(prompt):
response_text = response_text[len(prompt):].strip()
# Send response
print(json.dumps({
'response': response_text,
'model': model_name
}))
sys.stdout.flush()
except EOFError:
break
except Exception as e:
print(json.dumps({'error': str(e)}))
sys.stdout.flush()
except Exception as e:
print(f"Failed to load model: {e}", file=sys.stderr)
sys.exit(1)