-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcli.py
73 lines (65 loc) · 1.68 KB
/
cli.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import argparse
from typing import Literal, TypedDict
class Args(TypedDict, total=False):
device: Literal["cpu", "cuda"]
backend: Literal["onnx", "llama_cpp"]
model_path: str
tokenizer_path: str
batch_size: int
sessions: int
def init_server_args() -> Args:
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--device",
dest="device",
action="store",
default="cpu",
choices=["cpu", "cuda"],
help="Device to use for inference. (cpu/cuda, default: cpu)"
)
parser.add_argument(
"-b",
"--batch-size",
dest="batch_size",
action="store",
default="1",
type=int,
help="Batch size for inference. (default: 1)"
)
parser.add_argument(
"-f",
"--backend",
dest="backend",
action="store",
default="onnx",
choices=["onnx", "llama_cpp"],
help="Inference backend to use. (onnx/llama_cpp, default: onnx)"
)
parser.add_argument(
"-s",
"--sessions",
dest="sessions",
action="store",
default="1",
type=int,
help="Number of session instances for parallel processing. (default: 1)"
)
parser.add_argument(
"-m",
"--model-path",
dest="model_path",
action="store",
type=str,
help="Path to the model file. Required for model inference."
)
parser.add_argument(
"-t",
"--tokenizer-path",
dest="tokenizer_path",
action="store",
type=str,
help="Path to the tokenizer file."
)
args = parser.parse_args()
return Args(**args.__dict__)