Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,29 @@ FROM quay.io/ascend/cann:8.3.rc2-910b-ubuntu22.04-py3.11

ARG PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
ARG COMPILE_CUSTOM_KERNELS=1
ARG MOONCAKE_TAG="v0.3.7.post2"

# Define environments
ENV DEBIAN_FRONTEND=noninteractive
ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS}

RUN apt-get update -y && \
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev && \
rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/*

WORKDIR /workspace

COPY . /vllm-workspace/vllm-ascend/

# Install Mooncake dependencies
RUN apt-get update -y && \
apt-get install -y git vim wget net-tools gcc g++ cmake libnuma-dev && \
git clone --depth 1 --branch ${MOONCAKE_TAG} https://github.com/kvcache-ai/Mooncake /vllm-workspace/Mooncake && \
cp /vllm-workspace/vllm-ascend/tools/mooncake_installer.sh /vllm-workspace/Mooncake/ && \
cd /vllm-workspace/Mooncake && bash mooncake_installer.sh -y && \
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/lib64 && \
mkdir -p build && cd build && cmake .. -DUSE_ASCEND_DIRECT=ON && \
make -j$(nproc) && make install && \
rm -fr /vllm-workspace/Mooncake/build && \
rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/*

RUN pip config set global.index-url ${PIP_INDEX_URL}

# Install vLLM
Expand All @@ -54,7 +63,7 @@ RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
python3 -m pip cache purge

# Install modelscope (for fast download) and ray (for multinode)
RUN python3 -m pip install modelscope 'ray>=2.47.1' 'protobuf>3.20.0' && \
RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \
python3 -m pip cache purge

CMD ["/bin/bash"]
24 changes: 16 additions & 8 deletions Dockerfile.a3
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,29 @@ FROM quay.io/ascend/cann:8.3.rc2-a3-ubuntu22.04-py3.11

ARG PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
ARG COMPILE_CUSTOM_KERNELS=1
ARG MOONCAKE_TAG=v0.3.7.post2

COPY . /vllm-workspace/vllm-ascend/
# Define environments
ENV DEBIAN_FRONTEND=noninteractive
ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS}

RUN apt-get update -y && \
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev && \
rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/*
RUN pip config set global.index-url ${PIP_INDEX_URL}

WORKDIR /workspace

COPY . /vllm-workspace/vllm-ascend/

RUN pip config set global.index-url ${PIP_INDEX_URL}
# Install Mooncake dependencies
RUN apt-get update -y && \
apt-get install -y git vim wget net-tools gcc g++ cmake libnuma-dev && \
git clone --depth 1 --branch ${MOONCAKE_TAG} https://github.com/kvcache-ai/Mooncake /vllm-workspace/Mooncake && \
cp /vllm-workspace/vllm-ascend/tools/mooncake_installer.sh /vllm-workspace/Mooncake/ && \
cd /vllm-workspace/Mooncake && bash mooncake_installer.sh -y && \
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/lib64 && \
mkdir -p build && cd build && cmake .. -DUSE_ASCEND_DIRECT=ON && \
make -j$(nproc) && make install && \
rm -fr /vllm-workspace/Mooncake/build && \
rm -rf /var/cache/apt/* && \
rm -rf /var/lib/apt/lists/*

# Install vLLM
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
Expand All @@ -54,7 +62,7 @@ RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
python3 -m pip cache purge

# Install modelscope (for fast download) and ray (for multinode)
RUN python3 -m pip install modelscope 'ray>=2.47.1' 'protobuf>3.20.0' && \
RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \
python3 -m pip cache purge

CMD ["/bin/bash"]
25 changes: 19 additions & 6 deletions Dockerfile.a3.openEuler
Original file line number Diff line number Diff line change
Expand Up @@ -19,23 +19,36 @@ FROM quay.io/ascend/cann:8.3.rc2-a3-openeuler24.03-py3.11

ARG PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
ARG COMPILE_CUSTOM_KERNELS=1
ARG MOONCAKE_TAG="v0.3.7.post2"

ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS}

RUN yum update -y && \
yum install -y python3-pip git vim wget net-tools gcc gcc-c++ make cmake numactl-devel && \
rm -rf /var/cache/yum

RUN pip config set global.index-url ${PIP_INDEX_URL}

WORKDIR /workspace

COPY . /vllm-workspace/vllm-ascend/

SHELL ["/bin/bash", "-c"]

RUN yum update -y && \
yum install -y git vim wget net-tools gcc gcc-c++ make cmake numactl-devel && \
git clone --depth 1 --branch ${MOONCAKE_TAG} https://github.com/kvcache-ai/Mooncake /vllm-workspace/Mooncake && \
cp /vllm-workspace/vllm-ascend/tools/mooncake_installer.sh /vllm-workspace/Mooncake/ && \
ARCH=$(uname -m) && \
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/${ARCH}-linux/devlib:/usr/local/Ascend/ascend-toolkit/latest/${ARCH}-linux/lib64:$LD_LIBRARY_PATH && \
export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/c++/12:/usr/include/c++/12/${ARCH}-openEuler-linux && \
cd /vllm-workspace/Mooncake && \
bash mooncake_installer.sh -y && \
mkdir -p build && cd build && cmake .. -DUSE_ASCEND_DIRECT=ON && \
make -j$(nproc) && make install && \
rm -fr /vllm-workspace/Mooncake/build && \
rm -rf /var/cache/yum/*

# Install vLLM
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
ARG VLLM_TAG=v0.11.0

RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm
# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \
Expand All @@ -52,7 +65,7 @@ RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
python3 -m pip cache purge

# Install modelscope (for fast download) and ray (for multinode)
RUN python3 -m pip install modelscope 'ray>=2.47.1' 'protobuf>3.20.0' && \
RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \
python3 -m pip cache purge

CMD ["/bin/bash"]
26 changes: 20 additions & 6 deletions Dockerfile.openEuler
Original file line number Diff line number Diff line change
Expand Up @@ -19,23 +19,37 @@ FROM quay.io/ascend/cann:8.3.rc2-910b-openeuler24.03-py3.11

ARG PIP_INDEX_URL="https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
ARG COMPILE_CUSTOM_KERNELS=1
ARG MOONCAKE_TAG="v0.3.7.post2"

ENV COMPILE_CUSTOM_KERNELS=${COMPILE_CUSTOM_KERNELS}

RUN yum update -y && \
yum install -y python3-pip git vim wget net-tools gcc gcc-c++ make cmake numactl-devel && \
rm -rf /var/cache/yum

RUN pip config set global.index-url ${PIP_INDEX_URL}

WORKDIR /workspace

COPY . /vllm-workspace/vllm-ascend/

SHELL ["/bin/bash", "-c"]

RUN yum update -y && \
yum install -y git vim wget net-tools gcc gcc-c++ make cmake numactl-devel && \
git clone --depth 1 --branch ${MOONCAKE_TAG} https://github.com/kvcache-ai/Mooncake /vllm-workspace/Mooncake && \
cp /vllm-workspace/vllm-ascend/tools/mooncake_installer.sh /vllm-workspace/Mooncake/ && \
ARCH=$(uname -m) && \
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/${ARCH}-linux/devlib:/usr/local/Ascend/ascend-toolkit/latest/${ARCH}-linux/lib64:$LD_LIBRARY_PATH && \
export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/include/c++/12:/usr/include/c++/12/${ARCH}-openEuler-linux && \
cd /vllm-workspace/Mooncake && \
bash mooncake_installer.sh -y && \
mkdir -p build && cd build && cmake .. -DUSE_ASCEND_DIRECT=ON && \
make -j$(nproc) && make install && \
rm -fr /vllm-workspace/Mooncake/build && \
rm -rf /var/cache/yum/*

# Install vLLM
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
ARG VLLM_TAG=v0.11.0

ARG VLLM_TAG=v0.11.2
RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm
# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \
Expand All @@ -52,7 +66,7 @@ RUN export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
python3 -m pip cache purge

# Install modelscope (for fast download) and ray (for multinode)
RUN python3 -m pip install modelscope 'ray>=2.47.1' 'protobuf>3.20.0' && \
RUN python3 -m pip install modelscope 'ray>=2.47.1,<=2.48.0' 'protobuf>3.20.0' && \
python3 -m pip cache purge

CMD ["/bin/bash"]
Loading
Loading