diff --git a/gallery/hermes-vllm.yaml b/gallery/hermes-vllm.yaml new file mode 100644 index 00000000000..80277da09aa --- /dev/null +++ b/gallery/hermes-vllm.yaml @@ -0,0 +1,91 @@ +--- +name: "hermes-vllm" + +config_file: | + backend: vllm + context_size: 8192 + stopwords: + - "<|im_end|>" + - "" + - "<|eot_id|>" + - "<|end_of_text|>" + function: + disable_no_action: true + grammar: + # Uncomment the line below to enable grammar matching for JSON results if the model is breaking + # the output. This will make the model more accurate and won't break the JSON output. + # This however, will make parallel_calls not functional (it is a known bug) + # mixed_mode: true + disable: true + parallel_calls: true + expect_strings_after_json: true + json_regex_match: + - "(?s)(.*?)" + - "(?s)(.*)" + capture_llm_results: + - (?s)(.*?) + replace_llm_results: + - key: (?s)(.*?) + value: "" + + template: + use_tokenizer_template: true + chat: | + {{.Input -}} + <|im_start|>assistant + chat_message: | + <|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}} + {{- if .FunctionCall }} + + {{- else if eq .RoleName "tool" }} + + {{- end }} + {{- if .Content}} + {{.Content }} + {{- end }} + {{- if .FunctionCall}} + {{toJson .FunctionCall}} + {{- end }} + {{- if .FunctionCall }} + + {{- else if eq .RoleName "tool" }} + + {{- end }}<|im_end|> + completion: | + {{.Input}} + function: | + <|im_start|>system + You are a function calling AI model. + Here are the available tools: + + {{range .Functions}} + {'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }} + {{end}} + + You should call the tools provided to you sequentially + Please use XML tags to record your reasoning and planning before you call the functions as follows: + + {step-by-step reasoning and plan in bullet points} + + For each function call return a json object with function name and arguments within XML tags as follows: + + {"arguments": , "name": } + <|im_end|> + {{.Input -}} + <|im_start|>assistant +# Uncomment to specify a quantization method (optional) +# quantization: "awq" +# Uncomment to limit the GPU memory utilization (vLLM default is 0.9 for 90%) +# gpu_memory_utilization: 0.5 +# Uncomment to trust remote code from huggingface +# trust_remote_code: true +# Uncomment to enable eager execution +# enforce_eager: true +# Uncomment to specify the size of the CPU swap space per GPU (in GiB) +# swap_space: 2 +# Uncomment to specify the maximum length of a sequence (including prompt and output) +# max_model_len: 32768 +# Uncomment and specify the number of Tensor divisions. +# Allows you to partition and run large models. Performance gains are limited. +# https://github.com/vllm-project/vllm/issues/1435 +# tensor_parallel_size: 2 diff --git a/gallery/index.yaml b/gallery/index.yaml index 2a10723bad9..f34a09c4663 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -4752,6 +4752,38 @@ - filename: Hermes-3-Llama-3.1-70B.Q4_K_M.gguf sha256: 955c2f42caade4278f3c9dbffa32bb74572652b20e49e5340e782de3585bbe3f uri: huggingface://NousResearch/Hermes-3-Llama-3.1-70B-GGUF/Hermes-3-Llama-3.1-70B.Q4_K_M.gguf +- &hermes-vllm + url: "github:mudler/LocalAI/gallery/hermes-vllm.yaml@master" + name: "hermes-3-llama-3.1-8b:vllm" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/vG6j5WxHX09yj32vgjJlI.jpeg + tags: + - llm + - vllm + - gpu + - function-calling + license: llama-3 + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B + description: | + Hermes 3 is a generalist language model with many improvements over Hermes 2, including advanced agentic capabilities, much better roleplaying, reasoning, multi-turn conversation, long context coherence, and improvements across the board. It is designed to focus on aligning LLMs to the user, with powerful steering capabilities and control given to the end user. The model uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. It also supports function calling and structured output capabilities, generalist assistant capabilities, and improved code generation skills. + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-8B +- !!merge <<: *hermes-vllm + name: "hermes-3-llama-3.1-70b:vllm" + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-70B + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-70B +- !!merge <<: *hermes-vllm + name: "hermes-3-llama-3.1-405b:vllm" + icon: https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/-kj_KflXsdpcZoTQsvx7W.jpeg + urls: + - https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-405B + overrides: + parameters: + model: NousResearch/Hermes-3-Llama-3.1-405B - !!merge <<: *hermes-2-pro-mistral name: "biomistral-7b" description: | diff --git a/gallery/vllm.yaml b/gallery/vllm.yaml new file mode 100644 index 00000000000..d36ea96de47 --- /dev/null +++ b/gallery/vllm.yaml @@ -0,0 +1,29 @@ +--- +name: "vllm" + +config_file: | + backend: vllm + function: + disable_no_action: true + grammar: + disable: true + parallel_calls: true + expect_strings_after_json: true + template: + use_tokenizer_template: true + # Uncomment to specify a quantization method (optional) + # quantization: "awq" + # Uncomment to limit the GPU memory utilization (vLLM default is 0.9 for 90%) + # gpu_memory_utilization: 0.5 + # Uncomment to trust remote code from huggingface + # trust_remote_code: true + # Uncomment to enable eager execution + # enforce_eager: true + # Uncomment to specify the size of the CPU swap space per GPU (in GiB) + # swap_space: 2 + # Uncomment to specify the maximum length of a sequence (including prompt and output) + # max_model_len: 32768 + # Uncomment and specify the number of Tensor divisions. + # Allows you to partition and run large models. Performance gains are limited. + # https://github.com/vllm-project/vllm/issues/1435 + # tensor_parallel_size: 2