diff --git a/docker/jetson/Dockerfile b/docker/jetson/Dockerfile
new file mode 100644
index 0000000000..17a59bbd03
--- /dev/null
+++ b/docker/jetson/Dockerfile
@@ -0,0 +1,52 @@
+ARG JETPACK_VERSION=36.2.0 # Jetpack 6.0 DP
+
+FROM "nvcr.io/nvidia/l4t-jetpack:r$JETPACK_VERSION"
+
+ARG JETPACK_VERSION=36.2.0
+ARG HTTP_PROXY=""
+ARG HTTPS_PROXY=""
+
+ENV JETPACK_VERSION=$JETPACK_VERSION
+
+RUN apt-get update && apt-get install -y --no-install-recommends rapidjson-dev libgoogle-glog-dev gdb python3-pip git libopenblas-dev libopenmpi-dev && \
+ pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \
+ python3 -m pip install --no-cache-dir --upgrade pip setuptools==69.5.1 &&\
+ python3 -m pip install --no-cache-dir cmake packaging wheel && \
+ mkdir -p /workspace
+
+COPY . /workspace/lmdeploy
+
+WORKDIR /workspace/lmdeploy
+
+RUN mkdir -p /workspace/lmdeploy/torch-jetson && \
+ chmod +x /workspace/lmdeploy/docker/jetson/install_pytorch.sh && \
+ /workspace/lmdeploy/docker/jetson/install_pytorch.sh $JETPACK_VERSION
+
+RUN cd /workspace/lmdeploy &&\
+ python3 -m pip install --no-cache-dir -r requirements/build.txt
+
+# proxy
+ENV http_proxy=$HTTP_PROXY
+ENV https_proxy=$HTTPS_PROXY
+
+RUN mkdir -p build && cd build &&\
+ cmake .. \
+ -DCMAKE_BUILD_TYPE=RelWithDebInfo \
+ -DCMAKE_EXPORT_COMPILE_COMMANDS=1 \
+ -DCMAKE_INSTALL_PREFIX=/workspace/lmdeploy/install \
+ -DBUILD_PY_FFI=ON \
+ -DBUILD_MULTI_GPU=OFF \
+ -DBUILD_CUTLASS_MOE=OFF \
+ -DBUILD_CUTLASS_MIXED_GEMM=OFF \
+ -DCMAKE_CUDA_FLAGS="-lineinfo" \
+ -DUSE_NVTX=ON &&\
+ make -j$(nproc) && make install &&\
+ cd .. &&\
+ python3 -m pip install -e . &&\
+ rm -rf build
+
+ENV LD_LIBRARY_PATH=/workspace/lmdeploy/install/lib:$LD_LIBRARY_PATH
+# # explicitly set ptxas path for triton
+# ENV TRITON_PTXAS_PATH=/usr/local/cuda/bin/ptxas
+ENV http_proxy=""
+ENV https_proxy=""
diff --git a/docker/jetson/README.md b/docker/jetson/README.md
new file mode 100644
index 0000000000..a09487cb66
--- /dev/null
+++ b/docker/jetson/README.md
@@ -0,0 +1,22 @@
+# Jetson Support (beta)
+
+Build LMDeploy for NVIDIA Jetson platforms by docker.
+
+```sh
+export JETPACK_L4T="36.2.0" # Jetpack 6.0 DP
+
+cd lmdeploy
+docker build -t lmdeploy_jetson:r$JETPACK_L4T \
+ --build-arg JETPACK_VERSION=$JETPACK_L4T \
+ -f docker/jetson/Dockerfile .
+```
+
+Version Corresponding List:
+
+| $JETPACK_L4T | Jetpack Version | Python Version | Torch Version | CUDA VERSION | Support Boards |
+| :----------: | :-------------: | :------------: | :-----------: | :----------: | :------------------------------------------------------------------------------------------------: |
+| 35.2.1 | 5.1 | Python 3.8 | 2.0.0 | 11.4 | AGX Orin NX 32GB,
Orin NX 16GB,
Xavier NX series,
AGX Xavier Series |
+| 35.3.1 | 5.1.1 | Python 3.8 | 2.0.0 | 11.4 | AGX Orin Series,
Orin NX Series,
Orin Nano Series,
Xavier NX Series,
AGX Xavier Series |
+| 35.4.1 | 5.1.2 | Python 3.8 | 2.1.0 | 11.4 | AGX Orin Series,
Orin NX Series,
Orin Nano Series,
Xavier NX Series,
AGX Xavier Series |
+| 36.2.0 | 6.0 DP | Python 3.10 | 2.2.0 | 12.2 | AGX Orin Series,
Orin NX Series,
Orin Nano Series |
+| 36.3.0 | 6.0 | Python 3.10 | 2.4.0 | 12.2 | AGX Orin Series,
Orin NX Series,
Orin Nano Series |
diff --git a/docker/jetson/install_pytorch.sh b/docker/jetson/install_pytorch.sh
new file mode 100755
index 0000000000..e366450358
--- /dev/null
+++ b/docker/jetson/install_pytorch.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+JETPACK_VERSION=$1
+
+echo $JETPACK_VERSION
+
+if [ "$JETPACK_VERSION" = "35.2.1" ] ; then # Jetpack 5.1
+ wget "https://developer.download.nvidia.cn/compute/redist/jp/v51/pytorch/torch-2.0.0a0+8aa34602.nv23.03-cp38-cp38-linux_aarch64.whl" -P /workspace/lmdeploy/torch-jetson
+ python3 -m pip install "/workspace/lmdeploy/torch-jetson/torch-2.0.0a0+8aa34602.nv23.03-cp38-cp38-linux_aarch64.whl"
+elif [ "$JETPACK_VERSION" = "35.3.1" ] ; then # Jetpack 5.1.1
+ wget "https://developer.download.nvidia.cn/compute/redist/jp/v511/pytorch/torch-2.0.0a0+fe05266f.nv23.04-cp38-cp38-linux_aarch64.whl" -P /workspace/lmdeploy/torch-jetson
+ python3 -m pip install "/workspace/lmdeploy/torch-jetson/torch-2.0.0a0+fe05266f.nv23.04-cp38-cp38-linux_aarch64.whl"
+elif [ "$JETPACK_VERSION" = "35.4.1" ] ; then # Jetpack 5.1.2
+ wget "https://developer.download.nvidia.cn/compute/redist/jp/v512/pytorch/torch-2.1.0a0+41361538.nv23.06-cp38-cp38-linux_aarch64.whl" -P /workspace/lmdeploy/torch-jetson
+ python3 -m pip install "/workspace/lmdeploy/torch-jetson/torch-2.1.0a0+41361538.nv23.06-cp38-cp38-linux_aarch64.whl"
+elif [ "$JETPACK_VERSION" = "36.2.0" ] ; then # Jetpack 6.0 DP
+ wget "https://developer.download.nvidia.cn/compute/redist/jp/v60dp/pytorch/torch-2.2.0a0+81ea7a4.nv24.01-cp310-cp310-linux_aarch64.whl" -P /workspace/lmdeploy/torch-jetson
+ python3 -m pip install "/workspace/lmdeploy/torch-jetson/torch-2.2.0a0+81ea7a4.nv24.01-cp310-cp310-linux_aarch64.whl"
+elif [ "$JETPACK_VERSION" = "36.3.0" ] ; then # Jetpack 6.0
+ wget https://developer.download.nvidia.cn/compute/redist/jp/v60/pytorch/torch-2.4.0a0+07cecf4168.nv24.05.14710581-cp310-cp310-linux_aarch64.whl -P /workspace/lmdeploy/torch-jetson
+ python3 -m pip install "/workspace/lmdeploy/torch-jetson/torch-2.4.0a0+07cecf4168.nv24.05.14710581-cp310-cp310-linux_aarch64.whl"
+else
+ echo "We currently do not support Jetpack v$JETPACK_VERSION. Please try 35.2.1, 35.3.1, 35.4.1, 36.2.0, or 36.3.0"
+ exit 1
+fi
diff --git a/requirements/jetson.txt b/requirements/jetson.txt
new file mode 100644
index 0000000000..911192aa0b
--- /dev/null
+++ b/requirements/jetson.txt
@@ -0,0 +1,18 @@
+# runtime.txt without torch, torchvision and triton
+accelerate>=0.29.3
+einops
+fastapi
+fire
+mmengine-lite
+numpy<2.0.0
+peft<=0.11.1
+pillow
+protobuf
+pydantic>2.0.0
+pynvml
+safetensors
+sentencepiece
+shortuuid
+tiktoken
+transformers
+uvicorn
diff --git a/setup.py b/setup.py
index cfa15e76ae..8084ccf024 100644
--- a/setup.py
+++ b/setup.py
@@ -128,8 +128,23 @@ def gen_packages_items():
return packages
+def check_jetson_platform():
+ """Assuming Linux/arch64 is the NVIDIA Jetson platform.
+
+ If support for other Linux/Arrch64 platforms is introduced in the future,
+ please make appropriate modifications.
+ """
+ import platform
+ current_system = platform.system().lower()
+ current_arch = platform.machine().lower()
+ suspected_jetson = current_system == 'linux' and current_arch == 'aarch64'
+ if suspected_jetson: return True
+ return False
+
+
if __name__ == '__main__':
lmdeploy_package_data = ['lmdeploy/bin/llama_gemm']
+ jetson_support = check_jetson_platform()
setup(
name='lmdeploy',
version=get_version(),
@@ -145,7 +160,8 @@ def gen_packages_items():
include_package_data=True,
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/test.txt'),
- install_requires=parse_requirements('requirements/runtime.txt'),
+ install_requires=parse_requirements('requirements/runtime.txt') if
+ not jetson_support else parse_requirements('requirements/jetson.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'lite': parse_requirements('requirements/lite.txt'),