Skip to content

Commit

Permalink
Merge branch 'master' into feat-request-middleware
Browse files Browse the repository at this point in the history
  • Loading branch information
dave-gray101 authored Oct 16, 2024
2 parents 0776abe + a60b9b7 commit ed7f03e
Show file tree
Hide file tree
Showing 19 changed files with 70 additions and 18 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ DETECT_LIBS?=true
# llama.cpp versions
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
CPPLLAMA_VERSION?=755a9b2bf00fbae988e03a47e852b66eaddd113a
CPPLLAMA_VERSION?=9e041024481f6b249ab8918e18b9477f873b5a5e

# go-rwkv version
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
Expand Down
2 changes: 1 addition & 1 deletion backend/python/autogptq/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
accelerate
auto-gptq==0.7.1
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
transformers
2 changes: 1 addition & 1 deletion backend/python/bark/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
bark==0.1.5
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/common/template/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
2 changes: 1 addition & 1 deletion backend/python/coqui/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
coqui-tts
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/diffusers/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
setuptools
grpcio==1.66.2
grpcio==1.67.0
pillow
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/exllama2/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
wheel
Expand Down
2 changes: 1 addition & 1 deletion backend/python/mamba/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/openvoice/requirements-intel.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
intel-extension-for-pytorch
torch
optimum[openvino]
grpcio==1.66.2
grpcio==1.67.0
protobuf
librosa==0.9.1
faster-whisper==1.0.3
Expand Down
2 changes: 1 addition & 1 deletion backend/python/openvoice/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
librosa
faster-whisper
Expand Down
2 changes: 1 addition & 1 deletion backend/python/parler-tts/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
llvmlite==0.43.0
2 changes: 1 addition & 1 deletion backend/python/rerankers/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/sentencetransformers/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
datasets
Expand Down
2 changes: 1 addition & 1 deletion backend/python/transformers-musicgen/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
scipy==1.14.0
certifi
2 changes: 1 addition & 1 deletion backend/python/transformers/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406
2 changes: 1 addition & 1 deletion backend/python/vall-e-x/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
2 changes: 1 addition & 1 deletion backend/python/vllm/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then
git clone https://github.com/vllm-project/vllm
fi
pushd vllm
uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.66.2 protobuf bitsandbytes
uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.67.0 protobuf bitsandbytes
uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu
VLLM_TARGET_DEVICE=cpu python setup.py install
popd
Expand Down
2 changes: 1 addition & 1 deletion backend/python/vllm/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
grpcio==1.66.2
grpcio==1.67.0
protobuf
certifi
setuptools
52 changes: 52 additions & 0 deletions gallery/index.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -572,6 +572,26 @@
- filename: EdgeRunner-Command-Nested.i1-Q4_K_M.gguf
sha256: a1cc4d2b601dc20e58cbb549bd3e9bc460995840c0aaf1cd3c1cb5414c900ac7
uri: huggingface://mradermacher/EdgeRunner-Command-Nested-i1-GGUF/EdgeRunner-Command-Nested.i1-Q4_K_M.gguf
- !!merge <<: *qwen25
name: "tsunami-0.5x-7b-instruct-i1"
icon: https://huggingface.co/Tsunami-th/Tsunami-0.5x-7B-Instruct/resolve/main/Tsunami.webp
urls:
- https://huggingface.co/Tsunami-th/Tsunami-0.5x-7B-Instruct
- https://huggingface.co/mradermacher/Tsunami-0.5x-7B-Instruct-i1-GGUF
description: |
TSUNAMI: Transformative Semantic Understanding and Natural Augmentation Model for Intelligence.
TSUNAMI full name was created by ChatGPT.
infomation
Tsunami-0.5x-7B-Instruct is Thai Large Language Model that fine-tuned from Qwen2.5-7B around 100,000 rows in Thai dataset.
overrides:
parameters:
model: Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf
files:
- filename: Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf
sha256: 22e2003ecec7f1e91f2e9aaec334613c0f37fb3000d0e628b5a9980e53322fa7
uri: huggingface://mradermacher/Tsunami-0.5x-7B-Instruct-i1-GGUF/Tsunami-0.5x-7B-Instruct.i1-Q4_K_M.gguf
- &archfunct
license: apache-2.0
tags:
Expand Down Expand Up @@ -1620,6 +1640,38 @@
- filename: Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf
sha256: 282456efcb6c7e54d34ac25ae7fc022a94152ed77281ae4625b9628091e0a3d6
uri: huggingface://mradermacher/Doctoraifinetune-3.1-8B-i1-GGUF/Doctoraifinetune-3.1-8B.i1-Q4_K_M.gguf
- !!merge <<: *llama31
name: "astral-fusion-neural-happy-l3.1-8b"
urls:
- https://huggingface.co/ZeroXClem/Astral-Fusion-Neural-Happy-L3.1-8B
- https://huggingface.co/mradermacher/Astral-Fusion-Neural-Happy-L3.1-8B-GGUF
description: |
Astral-Fusion-Neural-Happy-L3.1-8B is a celestial blend of magic, creativity, and dynamic storytelling. Designed to excel in instruction-following, immersive roleplaying, and magical narrative generation, this model is a fusion of the finest qualities from Astral-Fusion, NIHAPPY, and NeuralMahou. ✨🚀
This model is perfect for anyone seeking a cosmic narrative experience, with the ability to generate both precise instructional content and fantastical stories in one cohesive framework. Whether you're crafting immersive stories, creating AI roleplaying characters, or working on interactive storytelling, this model brings out the magic. 🌟
overrides:
parameters:
model: Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf
files:
- filename: Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf
sha256: 14a3b07c1723ef1ca24f99382254b1227d95974541e23792a4e7ff621896055d
uri: huggingface://mradermacher/Astral-Fusion-Neural-Happy-L3.1-8B-GGUF/Astral-Fusion-Neural-Happy-L3.1-8B.Q4_K_M.gguf
- !!merge <<: *llama31
name: "mahou-1.5-llama3.1-70b-i1"
url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
icon: https://huggingface.co/flammenai/Mahou-1.0-mistral-7B/resolve/main/mahou1.png
urls:
- https://huggingface.co/flammenai/Mahou-1.5-llama3.1-70B
- https://huggingface.co/mradermacher/Mahou-1.5-llama3.1-70B-i1-GGUF
description: |
Mahou is designed to provide short messages in a conversational context. It is capable of casual conversation and character roleplay.
overrides:
parameters:
model: Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf
files:
- filename: Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf
sha256: c2711c4c9c8d011edbeaa391b4418d433e273a318d1de3dbdda9b85baf4996f2
uri: huggingface://mradermacher/Mahou-1.5-llama3.1-70B-i1-GGUF/Mahou-1.5-llama3.1-70B.i1-Q4_K_M.gguf
- &deepseek
## Deepseek
url: "github:mudler/LocalAI/gallery/deepseek.yaml@master"
Expand Down

0 comments on commit ed7f03e

Please sign in to comment.