-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Initial commit of support for dockerizing lollms-webui
- Loading branch information
1 parent
8104bf1
commit 166a2ac
Showing
90 changed files
with
13,619 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
MODEL_DIR=/media/teamgroup/models | ||
MODEL=openorca-preview1-200k-llama-13b.ggmlv3.q4_K_M.bin |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder | ||
|
||
COPY --from=continuumio/miniconda3:4.12.0 /opt/conda /opt/conda | ||
|
||
ENV PATH=/opt/conda/bin:$PATH | ||
|
||
RUN apt-get update && apt-get upgrade -y \ | ||
&& apt-get install -y git build-essential \ | ||
ocl-icd-opencl-dev opencl-headers clinfo \ | ||
&& mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd | ||
|
||
RUN conda create -y -n lollms python=3.10 | ||
|
||
SHELL ["conda", "run", "-n", "lollms", "/bin/bash", "-c"] | ||
|
||
RUN pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 | ||
|
||
RUN git clone https://github.com/TimDettmers/bitsandbytes.git --branch 0.40.0 \ | ||
&& cd bitsandbytes \ | ||
&& CUDA_VERSION=118 make cuda11x \ | ||
&& python3 setup.py install | ||
|
||
RUN pip3 uninstall -y llama-cpp-python \ | ||
&& CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip3 install llama-cpp-python==0.1.72 --no-cache-dir | ||
|
||
RUN git clone https://github.com/ParisNeo/lollms-webui.git --branch v3.0 | ||
|
||
WORKDIR /lollms-webui | ||
|
||
RUN pip3 install -r requirements.txt | ||
|
||
RUN git clone https://github.com/ParisNeo/lollms_personalities_zoo.git personalities_zoo \ | ||
&& cd personalities_zoo && git checkout 643cc32d2585465ca74732f956f80dafa4c82fce | ||
|
||
RUN git clone https://github.com/ParisNeo/lollms_bindings_zoo.git bindings_zoo \ | ||
&& cd bindings_zoo && git checkout ba6bc56520c9b24b77e0555c919de22d5e9dedbd | ||
|
||
RUN bash -c 'for i in bindings_zoo/*/requirements.txt ; do pip3 install -r $i ; done' | ||
|
||
RUN CT_CUBLAS=1 pip3 install ctransformers --no-binary ctransformers | ||
|
||
RUN conda clean -afy | ||
|
||
FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04 | ||
|
||
COPY --from=builder /opt/conda /opt/conda | ||
COPY --from=builder /usr/local/cuda-11.8/targets/x86_64-linux/include /usr/local/cuda-11.8/targets/x86_64-linux/include | ||
|
||
ENV PATH=/opt/conda/bin:$PATH | ||
|
||
COPY --from=builder /lollms-webui /lollms-webui | ||
|
||
RUN apt-get update && apt-get upgrade -y \ | ||
&& apt-get -y install python3 build-essential git \ | ||
&& apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ | ||
&& mkdir -p /etc/OpenCL/vendors | ||
|
||
COPY --from=builder /etc/OpenCL/vendors/nvidia.icd /etc/OpenCL/vendors/nvidia.icd | ||
|
||
WORKDIR /lollms-webui | ||
|
||
#RUN mkdir -p models/{py_llama_cpp,c_transformers,llama_cpp_official,binding_template,gpt_j_m,gpt_4all,open_ai,gpt_j_a,gptq,hugging_face} | ||
|
||
RUN mkdir /models && mkdir models && cd models && for dir in py_llama_cpp c_transformers llama_cpp_official binding_template gpt_j_m gpt_4all open_ai gpt_j_a gptq hugging_face; \ | ||
do ln -s /models $dir; \ | ||
done | ||
|
||
|
||
COPY ./global_paths_cfg.yaml . | ||
|
||
EXPOSE 9600 | ||
|
||
RUN echo "source activate lollms" >> ~/.bashrc | ||
|
||
# Define the entrypoint | ||
ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "lollms"] | ||
CMD ["python3", "app.py"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,36 @@ | ||
# lollms-webui-docker | ||
A Dockerfile and setup to run lollms-webui in a containerized environment | ||
|
||
Docker images and configuration to run lollms-webui with GPU, currently updated to release v3.0 found here: https://github.com/ParisNeo/lollms-webui/releases/tag/v3.0 | ||
|
||
# Build instructions | ||
|
||
First checkout this branch | ||
|
||
```sh | ||
git clone https://github.com/noneabove1182/lollms-webui-docker.git | ||
``` | ||
|
||
Next, build the image | ||
|
||
```sh | ||
cd lollms-webui-docker | ||
docker build -t lollms-webui-docker:latest . | ||
``` | ||
|
||
Now run the image | ||
|
||
```sh | ||
docker run --gpus all -p 7860:7860 -v /models:/models -v ./help:/lollms-webui/help -v ./data:/lollms-webui/data -v ./data/.parisneo:/root/.parisneo/ -v ./configs:/lollms-webui/configs -v ./web:/lollms-webui/web lollms-webui-docker:latest python3 app.py --host 0.0.0.0 --port 9600 --db_path data/database.db | ||
``` | ||
|
||
# Running pre-built image | ||
|
||
Pre-built images are provided at https://hub.docker.com/r/noneabove1182/text-gen-ui-gpu | ||
|
||
Follow the same command as above except with noneabove1182/text-gen-ui-gpu:(version) | ||
|
||
# Running with docker compose | ||
|
||
A docker-compose.yaml file has been provided, as well as a .env file that I use for setting my model dir and the model name I'd like to load in with | ||
|
||
Feel free to modify both to fit your needs, for example I prefer --no-stream but if you don't you can remove it |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
# =================== Lord Of Large Language Models Configuration file =========================== | ||
version: 7 | ||
binding_name: c_transformers | ||
model_name: CHANGEME | ||
|
||
# Host information | ||
host: localhost | ||
port: 9600 | ||
|
||
# Genreration parameters | ||
seed: -1 | ||
n_predict: 1024 | ||
ctx_size: 2048 | ||
temperature: 0.9 | ||
top_k: 50 | ||
top_p: 0.95 | ||
repeat_last_n: 40 | ||
repeat_penalty: 1.2 | ||
|
||
n_threads: 8 | ||
|
||
#Personality parameters | ||
personalities: ["english/generic/lollms"] | ||
active_personality_id: 0 | ||
override_personality_model_parameters: false #if true the personality parameters are overriden by those of the configuration (may affect personality behaviour) | ||
|
||
user_name: user | ||
|
||
# UI parameters | ||
debug: False | ||
db_path: database.db |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
version: '3.9' | ||
|
||
services: | ||
webui: | ||
volumes: | ||
- '${MODEL_DIR}:/lollms-webui/models' | ||
- ./help:/lollms-webui/help | ||
- ./data:/lollms-webui/data | ||
- ./data/.parisneo:/root/.parisneo/ | ||
- ./configs:/lollms-webui/configs | ||
- ./web:/lollms-webui/web | ||
ports: | ||
- "9600:9600" | ||
image: 'noneabove1182/lollms:latest' | ||
ulimits: | ||
memlock: -1 | ||
mem_limit: 50gb | ||
deploy: | ||
resources: | ||
reservations: | ||
devices: | ||
- driver: nvidia | ||
count: 1 | ||
capabilities: [ gpu ] | ||
command: | ||
[ | ||
"python3", | ||
"app.py", | ||
"--host", | ||
"0.0.0.0", | ||
"--port", | ||
"9600", | ||
"--db_path", | ||
"data/database.db" | ||
] | ||
volumes: | ||
lollms: |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
lollms_path: /opt/conda/envs/lollms/lib/python3.10/site-packages/lollms | ||
lollms_personal_path: /lollms-webui |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
question,answer | ||
What is LoLLMs WebUI?,LoLLMs WebUI is a user-friendly interface that provides access to various Large Language Model (LLM) models for a wide range of tasks. | ||
What are the features of LoLLMs WebUI?,The features of LoLLMs WebUI include:<br>- Choosing preferred binding, model, and personality<br>- Enhancing emails, essays, code debugging, thought organization, and more<br>- Exploring functionalities like searching, data organization, and image generation<br>- Easy-to-use UI with light and dark mode options<br>- Integration with GitHub repository<br>- Support for different personalities with predefined welcome messages<br>- Thumb up/down rating for generated answers<br>- Copy, edit, and remove messages<br>- Local database storage for discussions<br>- Search, export, and delete multiple discussions<br>- Support for Docker, conda, and manual virtual environment setups | ||
Where can I find a tutorial on how to use the tool?,You can find a tutorial on how to use the tool on our YouTube channel. Click here to watch the tutorial: <a href="https://youtu.be/ds_U0TDzbzI">YouTube Tutorial</a> | ||
What are the prerequisites for installing LoLLMs WebUI?,The prerequisites for installing LoLLMs WebUI are:<br>- Python 3.10 or higher<br>- Git (for cloning the repository)<br>Make sure Python is in your system's PATH and verify the Python version by running 'python --version' in the terminal. | ||
How do I install LoLLMs WebUI?,There are different installation methods for LoLLMs WebUI:<br>1. Easy install:<br>- Download the appropriate launcher for your platform (webui.bat for Windows, webui.sh for Linux).<br>- Place the launcher in a folder of your choice.<br>- Run the launcher script.<br>2. Using Conda:<br>- Clone the project or download the zip file from the GitHub repository.<br>- Create a new conda environment, activate it, and install the requirements.<br>3. Using Docker:<br>- Refer to the Docker documentation for installation instructions specific to your operating system. | ||
How do I launch the app?,You can launch the app by running the webui.sh or webui.bat launcher. Alternatively, you can activate the virtual environment and launch the application using 'python app.py' from the project's root directory. | ||
How do I select a model and binding?,To select a model and binding:<br>- Open the LoLLMs WebUI and go to the Settings page.<br>- In the Models Zoo tab, choose a binding from the list.<br>- Wait for the installation process to finish and click the Install button next to the desired model.<br>- After the model installation is complete, select the model and apply the changes.<br>- Don't forget to save the configuration. | ||
How do I start a discussion?,To start a discussion:<br>- Go to the Discussions view.<br>- Click the + button to create a new discussion.<br>- You will see a predefined welcome message based on the selected personality.<br>- Ask a question or provide an initial prompt to start the discussion.<br>- You can stop the generation process at any time by pressing the Stop Generating button. | ||
How do I manage discussions?,To manage discussions:<br>- To edit a discussion title, simply type a new title or modify the existing one.<br>- To delete a discussion, click the Delete button.<br>- To search for specific discussions, use the search button and enter relevant keywords.<br>- To perform batch operations (exporting |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
#!/bin/bash | ||
|
||
# Start the server | ||
conda run --no-capture-output -n lollms python app.py "$@" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
VITE_GPT4ALL_API = http://localhost:9600 # http://localhost:9600 | ||
VITE_GPT4ALL_API_CHANGE_ORIGIN = 0 # FALSE | ||
VITE_GPT4ALL_API_SECURE = 0 # FALSE | ||
VITE_GPT4ALL_API_BASEURL = / |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
/* eslint-env node */ | ||
require('@rushstack/eslint-patch/modern-module-resolution') | ||
|
||
module.exports = { | ||
root: true, | ||
'extends': [ | ||
'plugin:vue/vue3-essential', | ||
'eslint:recommended', | ||
'@vue/eslint-config-prettier/skip-formatting' | ||
], | ||
parserOptions: { | ||
ecmaVersion: 'latest' | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
# Logs | ||
logs | ||
*.log | ||
npm-debug.log* | ||
yarn-debug.log* | ||
yarn-error.log* | ||
pnpm-debug.log* | ||
lerna-debug.log* | ||
|
||
node_modules | ||
.DS_Store | ||
dist-ssr | ||
coverage | ||
*.local | ||
|
||
/cypress/videos/ | ||
/cypress/screenshots/ | ||
|
||
# Editor directories and files | ||
.vscode | ||
!.vscode/extensions.json | ||
.idea | ||
*.suo | ||
*.ntvs* | ||
*.njsproj | ||
*.sln | ||
*.sw? | ||
|
||
# REST Client files (VSCODE extension for making GET POST requests easy and fst from text files) | ||
*.http |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
{ | ||
"$schema": "https://json.schemastore.org/prettierrc", | ||
"semi": false, | ||
"tabWidth": 2, | ||
"singleQuote": true, | ||
"printWidth": 100, | ||
"trailingComma": "none" | ||
} |
Oops, something went wrong.