diff --git a/.bazelrc b/.bazelrc
index 04ac0ba143..801b7193d4 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -36,9 +36,7 @@ build:cxx11_abi --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=1"
build:cxx11_abi --linkopt="-D_GLIBCXX_USE_CXX11_ABI=1"
build:cxx11_abi --define=abi=cxx11_abi
-build:pre_cxx11_abi --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
-build:pre_cxx11_abi --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
-build:pre_cxx11_abi --define=abi=pre_cxx11_abi
+build:jetpack --//toolchains/dep_collection:compute_libs=jetpack
build:ci_testing --define=torchtrt_src=prebuilt --cxxopt="-DDISABLE_TEST_IN_CI" --action_env "NVIDIA_TF32_OVERRIDE=0"
build:use_precompiled_torchtrt --define=torchtrt_src=prebuilt
diff --git a/.bazelversion b/.bazelversion
index b26a34e470..0e79152459 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-7.2.1
+8.1.1
diff --git a/.github/scripts/filter-matrix.py b/.github/scripts/filter-matrix.py
index 3710539f59..401e1cd68d 100644
--- a/.github/scripts/filter-matrix.py
+++ b/.github/scripts/filter-matrix.py
@@ -1,13 +1,15 @@
#!/usr/bin/env python3
+from typing import List
import argparse
import json
import sys
disabled_python_versions = "3.13"
+cpu_tag = "cpu"
-def main(args: list[str]) -> None:
+def main(args: List[str]) -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--matrix",
@@ -24,7 +26,12 @@ def main(args: list[str]) -> None:
includes = matrix_dict["include"]
filtered_includes = []
for item in includes:
- if item["python_version"] not in disabled_python_versions:
+ if all(
+ [
+ item["python_version"] not in disabled_python_versions,
+ item["desired_cuda"] != cpu_tag
+ ]
+ ):
filtered_includes.append(item)
filtered_matrix_dict = {}
filtered_matrix_dict["include"] = filtered_includes
diff --git a/.github/scripts/generate_binary_build_matrix.py b/.github/scripts/generate_binary_build_matrix.py
index 52063ce9fc..a159a25f8d 100644
--- a/.github/scripts/generate_binary_build_matrix.py
+++ b/.github/scripts/generate_binary_build_matrix.py
@@ -2,6 +2,9 @@
"""Generates a matrix to be utilized through github actions
+Important. After making changes to this file please run following command:
+python -m tools.tests.test_generate_binary_build_matrix --update-reference-files
+
Will output a condensed version of the matrix if on a pull request that only
includes the latest version of python we support built on four different
architectures:
@@ -11,13 +14,13 @@
* Latest XPU
"""
-
import argparse
import json
import os
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
+
PYTHON_ARCHES_DICT = {
"nightly": ["3.9", "3.10", "3.11", "3.12"],
"test": ["3.9", "3.10", "3.11", "3.12"],
@@ -26,16 +29,30 @@
CUDA_ARCHES_DICT = {
"nightly": ["11.8", "12.6", "12.8"],
"test": ["11.8", "12.6", "12.8"],
- "release": ["11.8", "12.6", "12.8"],
+ "release": ["11.8", "12.4", "12.6"],
}
ROCM_ARCHES_DICT = {
- "nightly": ["6.1", "6.2"],
- "test": ["6.1", "6.2"],
- "release": ["6.1", "6.2"],
+ "nightly": ["6.2.4", "6.3"],
+ "test": ["6.2.4", "6.3"],
+ "release": ["6.1", "6.2.4"],
+}
+
+CUDA_CUDNN_VERSIONS = {
+ "11.8": {"cuda": "11.8.0", "cudnn": "9"},
+ "12.4": {"cuda": "12.4.1", "cudnn": "9"},
+ "12.6": {"cuda": "12.6.3", "cudnn": "9"},
+ "12.8": {"cuda": "12.8.0", "cudnn": "9"},
+}
+
+STABLE_CUDA_VERSIONS = {
+ "nightly": "12.6",
+ "test": "12.6",
+ "release": "12.4",
}
+CUDA_AARCH64_ARCHES = ["12.8-aarch64", "12.6-aarch64"]
+
PACKAGE_TYPES = ["wheel", "conda", "libtorch"]
-PRE_CXX11_ABI = "pre-cxx11"
CXX11_ABI = "cxx11-abi"
RELEASE = "release"
DEBUG = "debug"
@@ -59,7 +76,7 @@
CURRENT_NIGHTLY_VERSION = "2.8.0"
CURRENT_CANDIDATE_VERSION = "2.7.0"
-CURRENT_STABLE_VERSION = "2.7.0"
+CURRENT_STABLE_VERSION = "2.6.0"
CURRENT_VERSION = CURRENT_STABLE_VERSION
# By default use Nightly for CUDA arches
@@ -94,7 +111,7 @@ def arch_type(arch_version: str) -> str:
return ROCM
elif arch_version == CPU_AARCH64:
return CPU_AARCH64
- elif arch_version == CUDA_AARCH64:
+ elif arch_version in CUDA_AARCH64_ARCHES:
return CUDA_AARCH64
elif arch_version == XPU:
return XPU
@@ -140,11 +157,14 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
else:
PYTHON_ARCHES = PYTHON_ARCHES_DICT[channel]
WHEEL_CONTAINER_IMAGES = {
- "11.8": "pytorch/manylinux2_28-builder:cuda11.8",
- "12.1": "pytorch/manylinux2_28-builder:cuda12.1",
- "12.4": "pytorch/manylinux2_28-builder:cuda12.4",
- "12.6": "pytorch/manylinux2_28-builder:cuda12.6",
- "12.8": "pytorch/manylinux2_28-builder:cuda12.8",
+ **{
+ gpu_arch: f"pytorch/manylinux2_28-builder:cuda{gpu_arch}"
+ for gpu_arch in CUDA_ARCHES
+ },
+ **{
+ gpu_arch: f"pytorch/manylinuxaarch64-builder:cuda{gpu_arch.replace('-aarch64', '')}"
+ for gpu_arch in CUDA_AARCH64_ARCHES
+ },
**{
gpu_arch: f"pytorch/manylinux2_28-builder:rocm{gpu_arch}"
for gpu_arch in ROCM_ARCHES
@@ -153,26 +173,17 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
XPU: "pytorch/manylinux2_28-builder:xpu",
# TODO: Migrate CUDA_AARCH64 image to manylinux2_28_aarch64-builder:cuda12.4
CPU_AARCH64: "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64",
- CUDA_AARCH64: "pytorch/manylinuxaarch64-builder:cuda12.4",
+ CUDA_AARCH64: "pytorch/manylinuxaarch64-builder:cuda12.6",
}
LIBTORCH_CONTAINER_IMAGES = {
- **{
- (gpu_arch, PRE_CXX11_ABI): f"pytorch/manylinux2_28-builder:cuda{gpu_arch}"
- for gpu_arch in CUDA_ARCHES
- },
**{
(gpu_arch, CXX11_ABI): f"pytorch/libtorch-cxx11-builder:cuda{gpu_arch}"
for gpu_arch in CUDA_ARCHES
},
- **{
- (gpu_arch, PRE_CXX11_ABI): f"pytorch/manylinux2_28-builder:rocm{gpu_arch}"
- for gpu_arch in ROCM_ARCHES
- },
**{
(gpu_arch, CXX11_ABI): f"pytorch/libtorch-cxx11-builder:rocm{gpu_arch}"
for gpu_arch in ROCM_ARCHES
},
- (CPU, PRE_CXX11_ABI): "pytorch/manylinux2_28-builder:cpu",
(CPU, CXX11_ABI): "pytorch/libtorch-cxx11-builder:cpu",
}
@@ -181,7 +192,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
return {
CPU: "cpu",
CPU_AARCH64: CPU,
- CUDA_AARCH64: "cu124",
+ CUDA_AARCH64: f"cu{gpu_arch_version.replace('-aarch64', '').replace('.', '')}",
CUDA: f"cu{gpu_arch_version.replace('.', '')}",
ROCM: f"rocm{gpu_arch_version}",
XPU: "xpu",
@@ -272,7 +283,7 @@ def get_wheel_install_command(
return f"{WHL_INSTALL_BASE} {PACKAGES_TO_INSTALL_WHL} --index-url {get_base_download_url_for_repo('whl', channel, gpu_arch_type, desired_cuda)}_pypi_pkg" # noqa: E501
else:
raise ValueError(
- "Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6, 12.8 on Linux nightly builds." # noqa: E501
+ "Split build is not supported for this configuration. It is only supported for CUDA 11.8, 12.4, 12.6 on Linux nightly builds." # noqa: E501
)
if (
channel == RELEASE
@@ -343,7 +354,7 @@ def generate_libtorch_matrix(
if os == WINDOWS:
abi_versions = [RELEASE, DEBUG]
elif os == LINUX:
- abi_versions = [PRE_CXX11_ABI, CXX11_ABI]
+ abi_versions = [CXX11_ABI]
elif os in [MACOS_ARM64]:
abi_versions = [CXX11_ABI]
else:
@@ -422,11 +433,6 @@ def generate_wheels_matrix(
# Define default python version
python_versions = list(PYTHON_ARCHES)
- # If the list of python versions is set explicitly by the caller, stick with it instead
- # of trying to add more versions behind the scene
- if channel == NIGHTLY and (os in (LINUX, MACOS_ARM64, LINUX_AARCH64)):
- python_versions += ["3.13"]
-
if os == LINUX:
# NOTE: We only build manywheel packages for linux
package_type = "manywheel"
@@ -442,7 +448,11 @@ def generate_wheels_matrix(
if os == LINUX_AARCH64:
# Only want the one arch as the CPU type is different and
# uses different build/test scripts
- arches = [CPU_AARCH64, CUDA_AARCH64]
+ arches = []
+ if with_cpu == ENABLE:
+ arches += [CPU_AARCH64]
+ elif with_cuda == ENABLE:
+ arches += CUDA_AARCH64_ARCHES
if with_cuda == ENABLE:
upload_to_base_bucket = "no"
@@ -463,15 +473,15 @@ def generate_wheels_matrix(
ret: List[Dict[str, Any]] = []
for python_version in python_versions:
for arch_version in arches:
- # TODO: Enable Python 3.13 support for ROCM
- if arch_version in ROCM_ARCHES and python_version == "3.13":
- continue
-
gpu_arch_type = arch_type(arch_version)
gpu_arch_version = (
"" if arch_version in [CPU, CPU_AARCH64, XPU] else arch_version
)
+ # TODO: Enable python 3.13t on cpu-s390x or Windows
+ if (gpu_arch_type == "cpu-s390x") and python_version == "3.13t":
+ continue
+
desired_cuda = translate_desired_cuda(gpu_arch_type, gpu_arch_version)
entry = {
"python_version": python_version,
diff --git a/.github/workflows/build-test-linux-aarch64.yml b/.github/workflows/build-test-linux-aarch64.yml
new file mode 100644
index 0000000000..dce15a5cc0
--- /dev/null
+++ b/.github/workflows/build-test-linux-aarch64.yml
@@ -0,0 +1,350 @@
+name: Build and test Linux aarch64 wheels
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+ - nightly
+ - release/*
+ tags:
+ # NOTE: Binary build pipelines should only get triggered on release candidate builds
+ # Release candidate tags look like: v1.11.0-rc1
+ - v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
+ workflow_dispatch:
+
+jobs:
+ generate-matrix:
+ uses: ./.github/workflows/generate_binary_build_matrix.yml
+ with:
+ package-type: wheel
+ os: linux-aarch64
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ with-rocm: false
+ with-cpu: false
+ python-versions: '["3.11", "3.12", "3.10", "3.9"]'
+
+ filter-matrix:
+ needs: [generate-matrix]
+ outputs:
+ matrix: ${{ steps.generate.outputs.matrix }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+ - uses: actions/checkout@v4
+ with:
+ repository: pytorch/tensorrt
+ - name: Generate release matrix
+ id: generate
+ run: |
+ set -eou pipefail
+ MATRIX_BLOB=${{ toJSON(needs.generate-matrix.outputs.matrix) }}
+ MATRIX_BLOB="$(python3 .github/scripts/filter-matrix.py --matrix "${MATRIX_BLOB}")"
+ echo "${MATRIX_BLOB}"
+ echo "matrix=${MATRIX_BLOB}" >> "${GITHUB_OUTPUT}"
+
+ build:
+ needs: filter-matrix
+ permissions:
+ id-token: write
+ contents: read
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ pre-script: packaging/pre_build_script.sh
+ env-var-script: packaging/env_vars.txt
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ package-name: torch_tensorrt
+ name: Build torch-tensorrt whl package
+ uses: pytorch/test-infra/.github/workflows/build_wheels_linux.yml@main
+ with:
+ repository: ${{ matrix.repository }}
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ env-var-script: ${{ matrix.env-var-script }}
+ post-script: ${{ matrix.post-script }}
+ package-name: ${{ matrix.package-name }}
+ smoke-test-script: ${{ matrix.smoke-test-script }}
+ trigger-event: ${{ github.event_name }}
+ architecture: "aarch64"
+
+ tests-py-torchscript-fe:
+ name: Test torchscript frontend [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-torchscript-fe
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ export LD_LIBRARY_PATH=/usr/lib64:$LD_LIBRARY_PATH
+ pushd .
+ cd tests/modules
+ python hub.py
+ popd
+ pushd .
+ cd tests/py/ts
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_api_test_results.xml api/
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_models_test_results.xml models/
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/ts_integrations_test_results.xml integrations/
+ popd
+
+ tests-py-dynamo-converters:
+ name: Test dynamo converters [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-dynamo-converters
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml -n 4 conversion/
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml automatic_plugin/test_automatic_plugin.py
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dynamo_converters_test_results.xml automatic_plugin/test_automatic_plugin_with_attrs.py
+ popd
+
+ tests-py-dynamo-fe:
+ name: Test dynamo frontend [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-dynamo-fe
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/dyn_models_export.xml --ir dynamo models/
+ popd
+
+ tests-py-dynamo-serde:
+ name: Test dynamo export serde [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-dynamo-serde
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/export_serde_test_results.xml --ir dynamo models/test_export_serde.py
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/reexport_test_results.xml --ir dynamo models/test_reexport.py
+ popd
+
+ tests-py-torch-compile-be:
+ name: Test torch compile backend [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-torch-compile-be
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ python -m pytest -ra -n 10 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_be_test_results.xml backend/
+ python -m pytest -ra -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_complete_be_e2e_test_results.xml --ir torch_compile models/test_models.py
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/torch_compile_dyn_models_export.xml --ir torch_compile models/test_dyn_models.py
+ popd
+
+ tests-py-dynamo-core:
+ name: Test dynamo core [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-dynamo-core
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ python -m pytest -ra -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_test_results.xml --ignore runtime/test_002_cudagraphs_py.py --ignore runtime/test_002_cudagraphs_cpp.py runtime/
+ python -m pytest -ra -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_partitioning_test_results.xml partitioning/
+ python -m pytest -ra -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_lowering_test_results.xml lowering/
+ popd
+
+ tests-py-dynamo-cudagraphs:
+ name: Test dynamo cudagraphs [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-dynamo-cudagraphs
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py
+ python -m pip install -r requirements.txt
+ cd dynamo
+ nvidia-smi
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_cudagraphs_cpp_test_results.xml runtime/test_002_cudagraphs_cpp.py || true
+ python -m pytest -ra --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_dynamo_core_runtime_cudagraphs_py_test_results.xml runtime/test_002_cudagraphs_py.py || true
+ popd
+
+ tests-py-core:
+ name: Test core [Python]
+ needs: [filter-matrix, build]
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - repository: pytorch/tensorrt
+ package-name: torch_tensorrt
+ pre-script: packaging/pre_build_script.sh
+ post-script: packaging/post_build_script.sh
+ smoke-test-script: packaging/smoke_test_script.sh
+ uses: ./.github/workflows/linux-test.yml
+ with:
+ job-name: tests-py-core
+ repository: "pytorch/tensorrt"
+ ref: ""
+ test-infra-repository: pytorch/test-infra
+ test-infra-ref: main
+ build-matrix: ${{ needs.filter-matrix.outputs.matrix }}
+ pre-script: ${{ matrix.pre-script }}
+ architecture: "aarch64"
+ script: |
+ export USE_HOST_DEPS=1
+ export CI_BUILD=1
+ pushd .
+ cd tests/py/core
+ python -m pytest -ra -n 4 --junitxml=${RUNNER_TEST_RESULTS_DIR}/tests_py_core_test_results.xml .
+ popd
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ inputs.repository }}-${{ github.event_name == 'workflow_dispatch' }}-${{ inputs.job-name }}
+ cancel-in-progress: true
diff --git a/.github/workflows/build-test-linux.yml b/.github/workflows/build-test-linux-x86_64.yml
similarity index 99%
rename from .github/workflows/build-test-linux.yml
rename to .github/workflows/build-test-linux-x86_64.yml
index 024afd8c62..9951757ef3 100644
--- a/.github/workflows/build-test-linux.yml
+++ b/.github/workflows/build-test-linux-x86_64.yml
@@ -1,4 +1,4 @@
-name: Build and test Linux wheels
+name: Build and test Linux x86-64 wheels
on:
pull_request:
@@ -33,7 +33,7 @@ jobs:
steps:
- uses: actions/setup-python@v5
with:
- python-version: '3.11'
+ python-version: "3.11"
- uses: actions/checkout@v4
with:
repository: pytorch/tensorrt
diff --git a/.github/workflows/build-test-tensorrt-linux.yml b/.github/workflows/build-test-tensorrt-linux.yml
index 6ddd0e336d..2237ecb50d 100644
--- a/.github/workflows/build-test-tensorrt-linux.yml
+++ b/.github/workflows/build-test-tensorrt-linux.yml
@@ -3,7 +3,7 @@ name: Build and Test Torch-TensorRT on Linux with Future TensorRT Versions
on:
workflow_dispatch:
schedule:
- - cron: '0 0 * * 0' # Runs at 00:00 UTC every Sunday (minute hour day-of-month month-of-year day-of-week)
+ - cron: "0 0 * * 0" # Runs at 00:00 UTC every Sunday (minute hour day-of-month month-of-year day-of-week)
permissions:
id-token: write
@@ -30,7 +30,7 @@ jobs:
steps:
- uses: actions/setup-python@v5
with:
- python-version: '3.11'
+ python-version: "3.11"
- uses: actions/checkout@v4
with:
repository: pytorch/tensorrt
diff --git a/.github/workflows/generate_binary_build_matrix.yml b/.github/workflows/generate_binary_build_matrix.yml
index fceb8cf1ee..3352b6cb2d 100644
--- a/.github/workflows/generate_binary_build_matrix.yml
+++ b/.github/workflows/generate_binary_build_matrix.yml
@@ -42,6 +42,7 @@ on:
use-only-dl-pytorch-org:
description: "Use only download.pytorch.org when generating wheel install command?"
default: "false"
+ required: false
type: string
build-python-only:
description: "Generate binary build matrix for a python only package (i.e. only one python version)"
@@ -72,16 +73,16 @@ jobs:
steps:
- uses: actions/setup-python@v5
with:
- python-version: '3.11'
+ python-version: "3.11"
- name: Checkout test-infra repository
uses: actions/checkout@v4
with:
repository: ${{ inputs.test-infra-repository }}
ref: ${{ inputs.test-infra-ref }}
- - uses: ./.github/actions/set-channel
- uses: actions/checkout@v4
with:
repository: pytorch/tensorrt
+ - uses: pytorch/test-infra/.github/actions/set-channel@main
- name: Generate test matrix
id: generate
env:
@@ -109,4 +110,4 @@ jobs:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ inputs.package-type }}-${{ inputs.os }}-${{ inputs.test-infra-repository }}-${{ inputs.test-infra-ref }}
- cancel-in-progress: true
\ No newline at end of file
+ cancel-in-progress: true
diff --git a/.gitignore b/.gitignore
index 16e4f4f838..786b9542d6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,4 +74,5 @@ tests/py/dynamo/models/*.ts
tests/py/dynamo/models/*.ep
*.deb
*.tar.xz
-MODULE.bazel.lock
\ No newline at end of file
+MODULE.bazel.lock
+*.whl
diff --git a/MODULE.bazel b/MODULE.bazel
index 58de67800e..17f1c05bbc 100644
--- a/MODULE.bazel
+++ b/MODULE.bazel
@@ -1,13 +1,13 @@
module(
name = "torch_tensorrt",
- version = "2.8.0a0",
repo_name = "org_pytorch_tensorrt",
+ version = "2.8.0"
)
-bazel_dep(name = "googletest", version = "1.14.0")
-bazel_dep(name = "platforms", version = "0.0.10")
-bazel_dep(name = "rules_cc", version = "0.0.9")
-bazel_dep(name = "rules_python", version = "0.34.0")
+bazel_dep(name = "googletest", version = "1.16.0")
+bazel_dep(name = "platforms", version = "0.0.11")
+bazel_dep(name = "rules_cc", version = "0.1.1")
+bazel_dep(name = "rules_python", version = "1.3.0")
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
python.toolchain(
@@ -27,16 +27,26 @@ local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl"
# External dependency for torch_tensorrt if you already have precompiled binaries.
local_repository(
name = "torch_tensorrt",
- path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
+ path = "/opt/conda/lib/python3.10/site-packages/torch_tensorrt",
)
+
new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
# CUDA should be installed on the system locally
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
- path = "/usr/local/cuda-12.8/",
+ path = "/usr/local/cuda-12.8",
+)
+
+# Server Arm (SBSA) and Jetson Jetpack (L4T) use different versions of CUDA and TensorRT
+# These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
+
+new_local_repository(
+ name = "cuda_l4t",
+ build_file = "@//third_party/cuda:BUILD",
+ path = "/usr/local/cuda-12.6",
)
new_local_repository(
@@ -45,6 +55,7 @@ new_local_repository(
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v12.8/",
)
+
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
#############################################################################################################
@@ -59,17 +70,22 @@ http_archive(
)
http_archive(
- name = "libtorch_pre_cxx11_abi",
+ name = "libtorch_win",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-shared-with-deps-latest.zip"],
+ urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip"],
)
+
+# It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
+# using the build flag `--//toolchains/dep_src:torch="whl"`
+
http_archive(
- name = "libtorch_win",
+ name = "torch_whl",
build_file = "@//third_party/libtorch:BUILD",
- strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/nightly/cu128/libtorch-win-shared-with-deps-latest.zip"],
+ strip_prefix = "torch",
+ type = "zip",
+ urls = ["https://download.pytorch.org/whl/nightly/cu128/torch-2.8.0.dev20250415%2Bcu128-cp310-cp310-manylinux_2_28_aarch64.whl"],
)
# Download these tarballs manually from the NVIDIA website
@@ -85,6 +101,24 @@ http_archive(
],
)
+http_archive(
+ name = "tensorrt_sbsa",
+ build_file = "@//third_party/tensorrt/archive:BUILD",
+ strip_prefix = "TensorRT-10.9.0.34",
+ urls = [
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.9.0/tars/TensorRT-10.9.0.34.Linux.aarch64-gnu.cuda-12.8.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "tensorrt_l4t",
+ build_file = "@//third_party/tensorrt/archive:BUILD",
+ strip_prefix = "TensorRT-10.3.0.26",
+ urls = [
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
+ ],
+)
+
http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
@@ -94,6 +128,7 @@ http_archive(
],
)
+
####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################
@@ -105,17 +140,11 @@ http_archive(
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
# for both versions here and do not use --config=pre-cxx11-abi
-#new_local_repository(
+# new_local_repository(
# name = "libtorch",
-# path = "/usr/local/lib/python3.6/dist-packages/torch",
-# build_file = "third_party/libtorch/BUILD"
-#)
-
-#new_local_repository(
-# name = "libtorch_pre_cxx11_abi",
-# path = "/usr/local/lib/python3.6/dist-packages/torch",
+# path = "/workspace/tensorrt/.venv/lib/python3.10/site-packages/torch",
# build_file = "third_party/libtorch/BUILD"
-#)
+# )
#new_local_repository(
# name = "tensorrt",
diff --git a/core/BUILD b/core/BUILD
index 4fd1a7cf64..28cd460690 100644
--- a/core/BUILD
+++ b/core/BUILD
@@ -4,16 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
config_setting(
- name = "python_core",
- values = {
- "define": "target_lang=python",
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -24,6 +37,13 @@ config_setting(
],
)
+config_setting(
+ name = "python_core",
+ values = {
+ "define": "target_lang=python",
+ },
+)
+
cc_library(
name = "core",
srcs = [
@@ -39,9 +59,14 @@ cc_library(
"//core/runtime",
"//core/util/logging",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/BUILD b/core/conversion/BUILD
index d00defcbd8..13696550e6 100644
--- a/core/conversion/BUILD
+++ b/core/conversion/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -34,9 +54,14 @@ cc_library(
"//core/ir",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/conversionctx/BUILD b/core/conversion/conversionctx/BUILD
index 0e910d7127..89ff7f613c 100644
--- a/core/conversion/conversionctx/BUILD
+++ b/core/conversion/conversionctx/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -29,9 +49,14 @@ cc_library(
"//core/ir",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/converters/BUILD b/core/conversion/converters/BUILD
index 062f700cab..9571d91604 100644
--- a/core/conversion/converters/BUILD
+++ b/core/conversion/converters/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -29,9 +49,14 @@ cc_library(
"//core/conversion/conversionctx",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
@@ -49,9 +74,14 @@ cc_library(
"//core/conversion/conversionctx",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
@@ -106,9 +136,14 @@ cc_library(
"//core/plugins:torch_tensorrt_plugins",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/evaluators/BUILD b/core/conversion/evaluators/BUILD
index 5116f3f76f..172e8f6670 100644
--- a/core/conversion/evaluators/BUILD
+++ b/core/conversion/evaluators/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -35,8 +55,13 @@ cc_library(
"//core/conversion/var",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/conversion/tensorcontainer/BUILD b/core/conversion/tensorcontainer/BUILD
index c07e8f5516..c812b74a9f 100644
--- a/core/conversion/tensorcontainer/BUILD
+++ b/core/conversion/tensorcontainer/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -28,9 +48,14 @@ cc_library(
deps = [
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/conversion/var/BUILD b/core/conversion/var/BUILD
index 7042a1f402..65d9583474 100644
--- a/core/conversion/var/BUILD
+++ b/core/conversion/var/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -31,9 +51,14 @@ cc_library(
"//core/conversion/tensorcontainer",
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/ir/BUILD b/core/ir/BUILD
index 1e4640f08f..d522a6a018 100644
--- a/core/ir/BUILD
+++ b/core/ir/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -31,9 +51,14 @@ cc_library(
deps = [
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/lowering/BUILD b/core/lowering/BUILD
index 2e9d236b05..e9b1e1ae07 100644
--- a/core/lowering/BUILD
+++ b/core/lowering/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -33,8 +53,13 @@ cc_library(
"//core/lowering/passes",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/lowering/passes/BUILD b/core/lowering/passes/BUILD
index 5a99139db3..459eaf25ea 100644
--- a/core/lowering/passes/BUILD
+++ b/core/lowering/passes/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -56,8 +76,13 @@ cc_library(
deps = [
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/partitioning/BUILD b/core/partitioning/BUILD
index 784f20c719..2cbcec34b1 100644
--- a/core/partitioning/BUILD
+++ b/core/partitioning/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -36,8 +56,13 @@ cc_library(
"//core/partitioning/segmentedblock",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/partitioning/partitioningctx/BUILD b/core/partitioning/partitioningctx/BUILD
index c595ca001d..011a48c6be 100644
--- a/core/partitioning/partitioningctx/BUILD
+++ b/core/partitioning/partitioningctx/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -32,8 +52,13 @@ cc_library(
"//core/partitioning/segmentedblock",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/partitioning/partitioninginfo/BUILD b/core/partitioning/partitioninginfo/BUILD
index 96194bf629..d2a86a2688 100644
--- a/core/partitioning/partitioninginfo/BUILD
+++ b/core/partitioning/partitioninginfo/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -31,8 +51,13 @@ cc_library(
"//core/lowering",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/partitioning/segmentedblock/BUILD b/core/partitioning/segmentedblock/BUILD
index 44cd1da98d..c463d17b92 100644
--- a/core/partitioning/segmentedblock/BUILD
+++ b/core/partitioning/segmentedblock/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -31,8 +51,13 @@ cc_library(
"//core/lowering",
"//core/util:prelude",
] + select({
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
alwayslink = True,
diff --git a/core/plugins/BUILD b/core/plugins/BUILD
index 2b7a28848f..ba167d5f2a 100644
--- a/core/plugins/BUILD
+++ b/core/plugins/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -38,9 +58,26 @@ cc_library(
deps = [
"//core/util:prelude",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@tensorrt_win//:nvinferplugin", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@tensorrt//:nvinferplugin", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@tensorrt//:nvinferplugin", "@libtorch"],
+ ":windows": [
+ "@tensorrt_win//:nvinfer",
+ "@tensorrt_win//:nvinferplugin",
+ ],
+ ":sbsa": [
+ "@tensorrt_sbsa//:nvinfer",
+ "@tensorrt_sbsa//:nvinferplugin",
+ ],
+ ":jetpack": [
+ "@tensorrt_l4t//:nvinfer",
+ "@tensorrt_l4t//:nvinferplugin",
+ ],
+ "//conditions:default": [
+ "@tensorrt//:nvinfer",
+ "@tensorrt//:nvinferplugin",
+ ],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/runtime/BUILD b/core/runtime/BUILD
index 5a8825b235..f30519619b 100644
--- a/core/runtime/BUILD
+++ b/core/runtime/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -43,18 +63,14 @@ cc_library(
"//core/plugins:torch_tensorrt_plugins",
"//core/util:prelude",
] + select({
- ":use_pre_cxx11_abi": [
- "@libtorch_pre_cxx11_abi//:libtorch",
- "@tensorrt//:nvinfer",
- ],
- ":windows": [
- "@libtorch_win//:libtorch",
- "@tensorrt_win//:nvinfer",
- ],
- "//conditions:default": [
- "@libtorch",
- "@tensorrt//:nvinfer",
- ],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/util/BUILD b/core/util/BUILD
index 56e5182160..bc9b53ec8d 100644
--- a/core/util/BUILD
+++ b/core/util/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -41,7 +61,7 @@ cc_library(
":macros",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -74,9 +94,9 @@ cc_library(
"build_info.h",
],
deps = select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
)
@@ -92,9 +112,14 @@ cc_library(
":macros",
"//core/util/logging",
] + select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/core/util/logging/BUILD b/core/util/logging/BUILD
index 4796d5def1..d29568cf97 100644
--- a/core/util/logging/BUILD
+++ b/core/util/logging/BUILD
@@ -4,9 +4,29 @@ load("@rules_pkg//:pkg.bzl", "pkg_tar")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -26,9 +46,14 @@ cc_library(
"TorchTRTLogger.h",
],
deps = select({
- ":windows": ["@tensorrt_win//:nvinfer", "@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@tensorrt//:nvinfer", "@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@tensorrt//:nvinfer", "@libtorch"],
+ ":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
+ "//conditions:default": ["@tensorrt//:nvinfer"],
+ }) + select({
+ ":windows": ["@libtorch_win//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
alwayslink = True,
)
diff --git a/cpp/bin/torchtrtc/BUILD b/cpp/bin/torchtrtc/BUILD
index 00e2490daa..51ee4ca2ab 100644
--- a/cpp/bin/torchtrtc/BUILD
+++ b/cpp/bin/torchtrtc/BUILD
@@ -3,9 +3,9 @@ load("@rules_cc//cc:defs.bzl", "cc_binary")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
@@ -37,11 +37,11 @@ cc_binary(
] + select({
":windows": [
"@libtorch_win//:caffe2",
- "@libtorch_win//:libtorch",
+ "@libtorch_win//:libtorch"
],
- ":use_pre_cxx11_abi": [
- "@libtorch_pre_cxx11_abi//:caffe2",
- "@libtorch_pre_cxx11_abi//:libtorch",
+ ":use_torch_whl": [
+ "@torch_whl//:caffe2",
+ "@torch_whl//:libtorch"
],
"//conditions:default": [
"@libtorch",
diff --git a/docker/README.md b/docker/README.md
index 3effebbfbf..3852e0e22a 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,19 +1,20 @@
# Building a Torch-TensorRT container
-* Use `Dockerfile` to build a container which provides the exact development environment that our main branch is usually tested against.
+- Use `Dockerfile` to build a container which provides the exact development environment that our main branch is usually tested against.
-* The `Dockerfile` currently uses Bazelisk to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in dependencies.
- * The desired versions of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
- * [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional.
- * [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.11`, though this is optional as well.
+- The `Dockerfile` currently uses Bazelisk to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in dependencies.
-* This `Dockerfile` installs `cxx11-abi` versions of Pytorch and builds Torch-TRT using `cxx11-abi` libtorch as well. As of torch 2.7, torch requires `cxx11-abi` for all CUDA 11.8, 12.4, 12.6, and later versions.
+ - The desired versions of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
+ - [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional.
+ - [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.11`, though this is optional as well.
+
+- This `Dockerfile` installs `cxx11-abi` versions of Pytorch and builds Torch-TRT using `cxx11-abi` libtorch as well. As of torch 2.7, torch requires `cxx11-abi` for all CUDA 11.8, 12.4, 12.6, and later versions.
Note: By default the container uses the `cxx11-abi` version of Torch + Torch-TRT. If you are using a workflow that requires a build of PyTorch on the PRE CXX11 ABI, please add the Docker build argument: `--build-arg USE_PRE_CXX11_ABI=1`
### Dependencies
-* Install nvidia-docker by following https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker
+- Install nvidia-docker by following https://docs.nvidia.com/default/cloud-native/container-toolkit/install-guide.html#docker
### Instructions
@@ -23,25 +24,27 @@ Note: By default the container uses the `cxx11-abi` version of Torch + Torch-TRT
> From root of Torch-TensorRT repo
Build:
+
```
DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.9.0 -f docker/Dockerfile -t torch_tensorrt:latest .
```
Run:
+
```
nvidia-docker run --gpus all -it --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" --name=torch_tensorrt --ipc=host --net=host torch_tensorrt:latest
```
Test:
-
You can run any converter test to verify if Torch-TRT built successfully inside the container. Once you launch the container, you can run
+
```
bazel test //tests/core/conversion/converters:test_activation --compilation_mode=opt --test_output=summary --config use_precompiled_torchtrt --config pre_cxx11_abi
```
-* `--config use_precompiled_torchtrt` : Indicates bazel to use pre-installed Torch-TRT library to test an application.
-* `--config pre_cxx11_abi` : This flag ensures `bazel test` uses `pre_cxx11_abi` version of `libtorch`. Use this flag corresponding to the ABI format of your Torch-TensorRT installation.
+- `--config use_precompiled_torchtrt` : Indicates bazel to use pre-installed Torch-TRT library to test an application.
+- `--config pre_cxx11_abi` : This flag ensures `bazel test` uses `pre_cxx11_abi` version of `libtorch`. Use this flag corresponding to the ABI format of your Torch-TensorRT installation.
### Pytorch NGC containers
diff --git a/docsrc/getting_started/jetpack.rst b/docsrc/getting_started/jetpack.rst
index ddbf89dc63..b99c45addd 100644
--- a/docsrc/getting_started/jetpack.rst
+++ b/docsrc/getting_started/jetpack.rst
@@ -1,4 +1,4 @@
-.. _Torch_TensorRT_in_JetPack_6.1
+.. _Torch_TensorRT_in_l4t_6.1
Overview
##################
@@ -116,4 +116,3 @@ Please make sure to build torch_tensorrt wheel file from source release/2.5 bran
cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
# build and install torch_tensorrt wheel file
python setup.py install --user
-
diff --git a/examples/int8/benchmark/BUILD b/examples/int8/benchmark/BUILD
index 23d147c4c3..3b464b3a99 100644
--- a/examples/int8/benchmark/BUILD
+++ b/examples/int8/benchmark/BUILD
@@ -2,6 +2,13 @@ load("@rules_cc//cc:defs.bzl", "cc_library")
package(default_visibility = ["//visibility:public"])
+config_setting(
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
cc_library(
name = "benchmark",
srcs = [
@@ -13,7 +20,18 @@ cc_library(
],
deps = [
"//cpp:torch_tensorrt",
- "@libtorch",
- "@libtorch//:caffe2",
- ],
+ ] + select({
+ ":windows": [
+ "@libtorch_win//:libtorch",
+ "@libtorch_win//:caffe2",
+ ],
+ ":use_torch_whl": [
+ "@torch_whl//:libtorch",
+ "@torch_whl//:caffe2",
+ ],
+ "//conditions:default": [
+ "@libtorch//:libtorch",
+ "@libtorch//:caffe2",
+ ],
+ }),
)
diff --git a/examples/int8/ptq/BUILD b/examples/int8/ptq/BUILD
index c9fa200220..d30c7d3c03 100644
--- a/examples/int8/ptq/BUILD
+++ b/examples/int8/ptq/BUILD
@@ -2,6 +2,40 @@ load("@rules_cc//cc:defs.bzl", "cc_binary")
package(default_visibility = ["//visibility:public"])
+config_setting(
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
+ },
+)
+
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
cc_binary(
name = "ptq",
srcs = [
@@ -20,7 +54,22 @@ cc_binary(
"@libtorch",
"@libtorch//:caffe2",
] + select({
+ ":windows": [
+ "@libtorch_win//:libtorch",
+ "@libtorch_win//:caffe2",
+ ],
+ ":use_torch_whl": [
+ "@torch_whl//:libtorch",
+ "@torch_whl//:caffe2",
+ ],
+ "//conditions:default": [
+ "@libtorch//:libtorch",
+ "@libtorch//:caffe2",
+ ],
+ }) + select({
":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
"//conditions:default": ["@tensorrt//:nvinfer"],
})
)
diff --git a/examples/int8/qat/BUILD b/examples/int8/qat/BUILD
index e97398d932..0aab56a02a 100644
--- a/examples/int8/qat/BUILD
+++ b/examples/int8/qat/BUILD
@@ -2,6 +2,41 @@ load("@rules_cc//cc:defs.bzl", "cc_binary")
package(default_visibility = ["//visibility:public"])
+config_setting(
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
+ },
+)
+
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
+
cc_binary(
name = "qat",
srcs = [
@@ -20,7 +55,22 @@ cc_binary(
"@libtorch",
"@libtorch//:caffe2",
] + select({
+ ":windows": [
+ "@libtorch_win//:libtorch",
+ "@libtorch_win//:caffe2",
+ ],
+ ":use_torch_whl": [
+ "@torch_whl//:libtorch",
+ "@torch_whl//:caffe2",
+ ],
+ "//conditions:default": [
+ "@libtorch//:libtorch",
+ "@libtorch//:caffe2",
+ ],
+ }) + select({
":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
"//conditions:default": ["@tensorrt//:nvinfer"],
})
)
diff --git a/examples/torchtrt_runtime_example/BUILD b/examples/torchtrt_runtime_example/BUILD
index 957caedd23..7102a7f3db 100644
--- a/examples/torchtrt_runtime_example/BUILD
+++ b/examples/torchtrt_runtime_example/BUILD
@@ -2,6 +2,34 @@ load("@rules_cc//cc:defs.bzl", "cc_binary")
package(default_visibility = ["//visibility:public"])
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
+ },
+)
+
+config_setting(
+ name = "windows",
+ constraint_values = [
+ "@platforms//os:windows",
+ ],
+)
+
+
cc_binary(
name = "torchtrt_runtime_example",
srcs = [
@@ -13,6 +41,8 @@ cc_binary(
"@libtorch//:caffe2",
] + select({
":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
"//conditions:default": ["@tensorrt//:nvinfer"],
})
)
diff --git a/packaging/pre_build_script.sh b/packaging/pre_build_script.sh
index 173498201f..edc4ff8b76 100755
--- a/packaging/pre_build_script.sh
+++ b/packaging/pre_build_script.sh
@@ -5,10 +5,20 @@ set -x
# Install dependencies
python3 -m pip install pyyaml
-yum install -y ninja-build gettext
+install -y ninja-build gettext
-wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
- && mv bazelisk-linux-amd64 /usr/bin/bazel \
+PLATFORM="amd64"
+PLATFORM=x86_64
+BAZEL_PLATFORM=amd64
+if [[ $(uname -m) == "aarch64" ]]; then
+ PLATFORM=aarch64
+ BAZEL_PLATFORM=arm64
+
+ rm -rf /opt/openssl # Not sure whats up with the openssl mismatch
+fi
+
+wget https://github.com/bazelbuild/bazelisk/releases/download/v1.25.0/bazelisk-linux-${BAZEL_PLATFORM} \
+ && mv bazelisk-linux-${BAZEL_PLATFORM} /usr/bin/bazel \
&& chmod +x /usr/bin/bazel
TORCH_TORCHVISION=$(grep "^torch" py/requirements.txt)
diff --git a/setup.py b/setup.py
index 09933307c8..3798a20fcd 100644
--- a/setup.py
+++ b/setup.py
@@ -79,12 +79,13 @@ def load_dep_info():
dir_path = os.path.join(str(get_root_dir()), "py")
PRE_CXX11_ABI = False
-JETPACK_VERSION = None
+IS_JETPACK = False
PY_ONLY = False
NO_TS = False
LEGACY = False
RELEASE = False
CI_BUILD = False
+IS_SBSA = False
if "--fx-only" in sys.argv:
PY_ONLY = True
@@ -121,6 +122,9 @@ def load_dep_info():
if (gpu_arch_version := os.environ.get("CU_VERSION")) is None:
gpu_arch_version = f"cu{__cuda_version__.replace('.','')}"
+if (jetpack := os.environ.get("JETPACK_BUILD")) is None:
+ if jetpack == "1":
+ IS_JETPACK = True
if RELEASE:
__version__ = os.environ.get("BUILD_VERSION")
@@ -136,40 +140,12 @@ def load_dep_info():
if ci_env_var == "1":
CI_BUILD = True
-if "--use-pre-cxx11-abi" in sys.argv:
- sys.argv.remove("--use-pre-cxx11-abi")
- PRE_CXX11_ABI = True
-
-if (pre_cxx11_abi_env_var := os.environ.get("USE_PRE_CXX11_ABI")) is not None:
- if pre_cxx11_abi_env_var == "1":
- PRE_CXX11_ABI = True
-
if platform.uname().processor == "aarch64":
- if "--jetpack-version" in sys.argv:
- version_idx = sys.argv.index("--jetpack-version") + 1
- version = sys.argv[version_idx]
- sys.argv.remove(version)
- sys.argv.remove("--jetpack-version")
- if version == "4.5":
- JETPACK_VERSION = "4.5"
- elif version == "4.6":
- JETPACK_VERSION = "4.6"
- elif version == "5.0":
- JETPACK_VERSION = "5.0"
- elif version == "6.1":
- JETPACK_VERSION = "6.1"
-
- if not JETPACK_VERSION:
- warnings.warn(
- "Assuming jetpack version to be 6.1, if not use the --jetpack-version option"
- )
- JETPACK_VERSION = "6.1"
-
- if PRE_CXX11_ABI:
- warnings.warn(
- "Jetson platform detected. Please remove --use-pre-cxx11-abi flag if you are using it."
- )
-
+ if "--jetpack" in sys.argv:
+ sys.argv.remove("--jetpack")
+ IS_JETPACK = True
+ else:
+ IS_SBSA = True
BAZEL_EXE = None
if not PY_ONLY:
@@ -204,30 +180,17 @@ def build_libtorchtrt_cxx11_abi(
if target_python:
cmd.append("--config=python")
- if pre_cxx11_abi:
- cmd.append("--config=pre_cxx11_abi")
- print("using PRE CXX11 ABI build")
- else:
- cmd.append("--config=cxx11_abi")
- print("using CXX11 ABI build")
-
if IS_WINDOWS:
cmd.append("--config=windows")
else:
cmd.append("--config=linux")
- if JETPACK_VERSION == "4.5":
- cmd.append("--platforms=//toolchains:jetpack_4.5")
- print("Jetpack version: 4.5")
- elif JETPACK_VERSION == "4.6":
- cmd.append("--platforms=//toolchains:jetpack_4.6")
- print("Jetpack version: 4.6")
- elif JETPACK_VERSION == "5.0":
- cmd.append("--platforms=//toolchains:jetpack_5.0")
- print("Jetpack version: 5.0")
- elif JETPACK_VERSION == "6.1":
- cmd.append("--platforms=//toolchains:jetpack_6.1")
- print("Jetpack version: 6.1")
+ if IS_JETPACK:
+ cmd.append("--config=jetpack")
+
+ if IS_SBSA:
+ if CI_BUILD:
+ cmd.append("--//toolchains/dep_src:torch=whl")
if CI_BUILD:
cmd.append("--platforms=//toolchains:ci_rhel_x86_64_linux")
@@ -497,7 +460,8 @@ def run(self):
package_data = {}
if not (PY_ONLY or NO_TS):
- tensorrt_linux_external_dir = (
+
+ tensorrt_x86_64_external_dir = (
lambda: subprocess.check_output(
[BAZEL_EXE, "query", "@tensorrt//:nvinfer", "--output", "location"]
)
@@ -505,6 +469,32 @@ def run(self):
.strip()
.split("/BUILD.bazel")[0]
)
+
+ tensorrt_sbsa_external_dir = (
+ lambda: subprocess.check_output(
+ [BAZEL_EXE, "query", "@tensorrt_sbsa//:nvinfer", "--output", "location"]
+ )
+ .decode("ascii")
+ .strip()
+ .split("/BUILD.bazel")[0]
+ )
+
+ tensorrt_jetpack_external_dir = (
+ lambda: subprocess.check_output(
+ [BAZEL_EXE, "query", "@tensorrt_l4t//:nvinfer", "--output", "location"]
+ )
+ .decode("ascii")
+ .strip()
+ .split("/BUILD.bazel")[0]
+ )
+
+ if IS_SBSA:
+ tensorrt_linux_external_dir = tensorrt_sbsa_external_dir
+ elif IS_JETPACK:
+ tensorrt_linux_external_dir = tensorrt_jetpack_external_dir
+ else:
+ tensorrt_linux_external_dir = tensorrt_x86_64_external_dir
+
tensorrt_windows_external_dir = (
lambda: subprocess.check_output(
[BAZEL_EXE, "query", "@tensorrt_win//:nvinfer", "--output", "location"]
diff --git a/tests/core/BUILD b/tests/core/BUILD
index cc18d03ea7..a0e19fa232 100644
--- a/tests/core/BUILD
+++ b/tests/core/BUILD
@@ -1,9 +1,29 @@
load("@rules_cc//cc:defs.bzl", "cc_test")
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
+ },
+)
+
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
},
)
@@ -30,7 +50,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
diff --git a/tests/core/conversion/converters/BUILD b/tests/core/conversion/converters/BUILD
index 05ad6e356c..dfab90a978 100644
--- a/tests/core/conversion/converters/BUILD
+++ b/tests/core/conversion/converters/BUILD
@@ -1,9 +1,9 @@
load("//tests/core/conversion/converters:converter_test.bzl", "converter_test")
config_setting(
- name = "use_pre_cxx11_abi",
+ name = "use_torch_whl",
values = {
- "define": "abi=pre_cxx11_abi",
+ "define": "torch_src=whl",
},
)
diff --git a/tests/core/conversion/evaluators/BUILD b/tests/core/conversion/evaluators/BUILD
index 4059b97df3..9b2a708297 100644
--- a/tests/core/conversion/evaluators/BUILD
+++ b/tests/core/conversion/evaluators/BUILD
@@ -1,9 +1,9 @@
load("//tests/core/conversion/evaluators:evaluator_test.bzl", "evaluator_test")
config_setting(
- name = "use_pre_cxx11_abi",
+ name = "use_torch_whl",
values = {
- "define": "abi=pre_cxx11_abi",
+ "define": "torch_src=whl",
},
)
diff --git a/tests/core/lowering/BUILD b/tests/core/lowering/BUILD
index b2d3609ccf..2de6b6a7f7 100644
--- a/tests/core/lowering/BUILD
+++ b/tests/core/lowering/BUILD
@@ -2,9 +2,9 @@ load("@rules_cc//cc:defs.bzl", "cc_test")
load("//tests/core/lowering:lowering_test.bzl", "lowering_test")
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
diff --git a/tests/core/lowering/lowering_test.bzl b/tests/core/lowering/lowering_test.bzl
index ecdac73778..9d2c38a969 100644
--- a/tests/core/lowering/lowering_test.bzl
+++ b/tests/core/lowering/lowering_test.bzl
@@ -20,8 +20,8 @@ def lowering_test(name, visibility = None):
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
timeout = "short",
)
diff --git a/tests/core/partitioning/BUILD b/tests/core/partitioning/BUILD
index ed2328897c..b2f99dbd34 100644
--- a/tests/core/partitioning/BUILD
+++ b/tests/core/partitioning/BUILD
@@ -2,9 +2,9 @@ load("@rules_cc//cc:defs.bzl", "cc_test")
load("//tests/core/partitioning:partitioning_test.bzl", "partitioning_test")
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
@@ -15,6 +15,7 @@ config_setting(
],
)
+
filegroup(
name = "jit_models",
srcs = [
@@ -62,7 +63,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -78,7 +79,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -94,7 +95,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -110,7 +111,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
diff --git a/tests/core/partitioning/partitioning_test.bzl b/tests/core/partitioning/partitioning_test.bzl
index 322721aa93..f89dd22f40 100644
--- a/tests/core/partitioning/partitioning_test.bzl
+++ b/tests/core/partitioning/partitioning_test.bzl
@@ -20,8 +20,8 @@ def partitioning_test(name, visibility = None):
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
#timeout = "short",
)
diff --git a/tests/core/runtime/BUILD b/tests/core/runtime/BUILD
index d25379b44d..cd02a54b40 100644
--- a/tests/core/runtime/BUILD
+++ b/tests/core/runtime/BUILD
@@ -3,9 +3,9 @@ load("//tests/core/runtime:runtime_test.bzl", "runtime_test")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
@@ -16,6 +16,7 @@ config_setting(
],
)
+
runtime_test(
name = "test_multi_device_safe_mode",
)
diff --git a/tests/core/runtime/runtime_test.bzl b/tests/core/runtime/runtime_test.bzl
index 021321c40b..3363edb627 100644
--- a/tests/core/runtime/runtime_test.bzl
+++ b/tests/core/runtime/runtime_test.bzl
@@ -20,7 +20,7 @@ def runtime_test(name, visibility = None):
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
- "//conditions:default": ["@libtorch//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
+ "//conditions:default": ["@libtorch"],
}),
)
diff --git a/tests/cpp/BUILD b/tests/cpp/BUILD
index 2917f57130..b50a3c6783 100644
--- a/tests/cpp/BUILD
+++ b/tests/cpp/BUILD
@@ -3,9 +3,9 @@ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
@@ -94,7 +94,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -133,7 +133,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -149,7 +149,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -162,7 +162,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -178,7 +178,7 @@ cc_test(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
@@ -214,7 +214,7 @@ cc_library(
"@googletest//:gtest_main",
] + select({
":windows": ["@libtorch_win//:libtorch"],
- ":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
+ ":use_torch_whl": ["@torch_whl//:libtorch"],
"//conditions:default": ["@libtorch"],
}),
)
diff --git a/tests/util/BUILD b/tests/util/BUILD
index c49b88acf3..38e4b5ff6d 100644
--- a/tests/util/BUILD
+++ b/tests/util/BUILD
@@ -3,9 +3,9 @@ load("@rules_cc//cc:defs.bzl", "cc_library")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "use_pre_cxx11_abi",
- values = {
- "define": "abi=pre_cxx11_abi",
+ name = "use_torch_whl",
+ flag_values = {
+ "//toolchains/dep_src:torch": "whl"
},
)
@@ -16,6 +16,26 @@ config_setting(
],
)
+config_setting(
+ name = "sbsa",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "default"
+ },
+)
+
+config_setting(
+ name = "jetpack",
+ constraint_values = [
+ "@platforms//cpu:aarch64",
+ ],
+ flag_values = {
+ "//toolchains/dep_collection:compute_libs": "jetpack"
+ },
+)
+
config_setting(
name = "ci_build_testing",
values = {
@@ -39,15 +59,17 @@ cc_library(
"@googletest//:gtest_main",
] + select({
":windows": ["@tensorrt_win//:nvinfer"],
+ ":sbsa": ["@tensorrt_sbsa//:nvinfer"],
+ ":jetpack": ["@tensorrt_l4t//:nvinfer"],
"//conditions:default": ["@tensorrt//:nvinfer"],
}) + select({
":windows": [
"@libtorch_win//:caffe2",
"@libtorch_win//:libtorch",
],
- ":use_pre_cxx11_abi": [
- "@libtorch_pre_cxx11_abi//:caffe2",
- "@libtorch_pre_cxx11_abi//:libtorch",
+ ":use_torch_whl": [
+ "@torch_whl//:caffe2",
+ "@torch_whl//:libtorch",
],
"//conditions:default": [
"@libtorch",
diff --git a/third_party/tensorrt/archive/BUILD b/third_party/tensorrt/archive/BUILD
index c9b32dc23a..4aef027d12 100644
--- a/third_party/tensorrt/archive/BUILD
+++ b/third_party/tensorrt/archive/BUILD
@@ -3,11 +3,13 @@ load("@rules_cc//cc:defs.bzl", "cc_import", "cc_library")
package(default_visibility = ["//visibility:public"])
config_setting(
- name = "aarch64_linux",
+ name = "jetpack",
constraint_values = [
"@platforms//cpu:aarch64",
- "@platforms//os:linux",
],
+ flag_values = {
+ "@//toolchains/dep_collection:compute_libs": "jetpack"
+ },
)
config_setting(
@@ -58,6 +60,7 @@ cc_library(
"nvinfer_lib",
] + select({
":windows": ["@cuda_win//:cudart", "nvinfer_static_lib"],
+ ":jetpack": ["@cuda_l4t//:cudart", "nvinfer_static_lib"],
"//conditions:default": ["@cuda//:cudart"],
}),
)
@@ -200,6 +203,7 @@ cc_library(
"nvinfer",
] + select({
":windows": ["@cuda_win//:cudart"],
+ ":jetpack": ["@cuda_l4t//:cudart"],
"//conditions:default": ["@cuda//:cudart"],
}),
alwayslink = True,
diff --git a/toolchains/ci_workspaces/MODULE.bazel.tmpl b/toolchains/ci_workspaces/MODULE.bazel.tmpl
index e7a651c92d..dde6edc651 100644
--- a/toolchains/ci_workspaces/MODULE.bazel.tmpl
+++ b/toolchains/ci_workspaces/MODULE.bazel.tmpl
@@ -4,10 +4,10 @@ module(
version = "${BUILD_VERSION}"
)
-bazel_dep(name = "googletest", version = "1.14.0")
-bazel_dep(name = "platforms", version = "0.0.10")
-bazel_dep(name = "rules_cc", version = "0.0.9")
-bazel_dep(name = "rules_python", version = "0.34.0")
+bazel_dep(name = "googletest", version = "1.16.0")
+bazel_dep(name = "platforms", version = "0.0.11")
+bazel_dep(name = "rules_cc", version = "0.1.1")
+bazel_dep(name = "rules_python", version = "1.3.0")
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
python.toolchain(
@@ -27,7 +27,7 @@ local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl"
# External dependency for torch_tensorrt if you already have precompiled binaries.
local_repository(
name = "torch_tensorrt",
- path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
+ path = "/opt/conda/lib/python3.10/site-packages/torch_tensorrt",
)
@@ -40,6 +40,15 @@ new_local_repository(
path = "${CUDA_HOME}",
)
+# Server Arm (SBSA) and Jetson Jetpack (L4T) use different versions of CUDA and TensorRT
+# These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
+
+new_local_repository(
+ name = "cuda_l4t",
+ build_file = "@//third_party/cuda:BUILD",
+ path = "/usr/local/cuda-12.6",
+)
+
new_local_repository(
name = "cuda_win",
build_file = "@//third_party/cuda:BUILD",
@@ -53,12 +62,31 @@ http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "ht
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
#############################################################################################################
-http_archive(
- name = "libtorch",
- build_file = "@//third_party/libtorch:BUILD",
- strip_prefix = "libtorch",
- urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
-)
+# http_archive(
+# name = "libtorch",
+# build_file = "@//third_party/libtorch:BUILD",
+# strip_prefix = "libtorch",
+# urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
+# )
+
+# http_archive(
+# name = "libtorch_win",
+# build_file = "@//third_party/libtorch:BUILD",
+# strip_prefix = "libtorch",
+# urls = ["https://download.pytorch.org/libtorch//${CHANNEL}/${CU_VERSION}/libtorch-win-shared-with-deps-latest.zip"],
+# )
+
+
+# It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
+# using the build flag `--//toolchains/dep_src:torch="whl"`
+
+# http_archive(
+# name = "torch_whl",
+# build_file = "@//third_party/libtorch:BUILD",
+# strip_prefix = "torch",
+# type = "zip",
+# urls = ["file:///${TORCH_WHL_PATH}"],
+# )
# Download these tarballs manually from the NVIDIA website
# Either place them in the distdir directory in third_party and use the --distdir flag
@@ -73,6 +101,24 @@ http_archive(
],
)
+http_archive(
+ name = "tensorrt_sbsa",
+ build_file = "@//third_party/tensorrt/archive:BUILD",
+ strip_prefix = "TensorRT-10.9.0.34",
+ urls = [
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.9.0/tars/TensorRT-10.9.0.34.Linux.aarch64-gnu.cuda-12.8.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "tensorrt_l4t",
+ build_file = "@//third_party/tensorrt/archive:BUILD",
+ strip_prefix = "TensorRT-10.3.0.26",
+ urls = [
+ "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
+ ],
+)
+
http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
@@ -95,13 +141,13 @@ http_archive(
# for both versions here and do not use --config=pre-cxx11-abi
new_local_repository(
- name = "libtorch_win",
+ name = "libtorch",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD"
)
new_local_repository(
- name = "libtorch_pre_cxx11_abi",
+ name = "libtorch_win",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD"
)
diff --git a/toolchains/dep_collection/BUILD b/toolchains/dep_collection/BUILD
new file mode 100644
index 0000000000..4c0dff784c
--- /dev/null
+++ b/toolchains/dep_collection/BUILD
@@ -0,0 +1,7 @@
+package(default_visibility = ["//visibility:public"])
+
+load(":defs.bzl", "dep_collection")
+dep_collection(
+ name = "compute_libs",
+ build_setting_default = "default",
+)
diff --git a/toolchains/dep_collection/defs.bzl b/toolchains/dep_collection/defs.bzl
new file mode 100644
index 0000000000..6eaa710261
--- /dev/null
+++ b/toolchains/dep_collection/defs.bzl
@@ -0,0 +1,18 @@
+# buildifier: disable=module-docstring
+DependencyCollectionInfo = provider(doc = "", fields = ["type"])
+
+collection_types = ["default", "jetpack"]
+
+def _impl(ctx):
+ _type = ctx.build_setting_value
+ if _type not in collection_types:
+ fail(str(ctx.label) + " build setting allowed to take values {" +
+ ", ".join(collection_types) + "} but was set to unallowed value " +
+ _type)
+
+ return DependencyCollectionInfo(type = _type)
+
+dep_collection = rule(
+ implementation = _impl,
+ build_setting = config.string(flag = True),
+)
diff --git a/toolchains/dep_src/BUILD b/toolchains/dep_src/BUILD
new file mode 100644
index 0000000000..b51ef75b78
--- /dev/null
+++ b/toolchains/dep_src/BUILD
@@ -0,0 +1,7 @@
+package(default_visibility = ["//visibility:public"])
+
+load(":defs.bzl", "dep_src")
+dep_src(
+ name = "torch",
+ build_setting_default = "archive",
+)
diff --git a/toolchains/dep_src/defs.bzl b/toolchains/dep_src/defs.bzl
new file mode 100644
index 0000000000..bec0efb8cf
--- /dev/null
+++ b/toolchains/dep_src/defs.bzl
@@ -0,0 +1,18 @@
+# buildifier: disable=module-docstring
+DepSrcInfo = provider(doc = "", fields = ["type"])
+
+src_types = ["archive", "whl", "local"]
+
+def _impl(ctx):
+ src = ctx.build_setting_value
+ if src not in src_types:
+ fail(str(ctx.label) + " build setting allowed to take values {" +
+ ", ".join(src_types) + "} but was set to unallowed value " +
+ src)
+
+ return DepSrcInfo(type = src)
+
+dep_src = rule(
+ implementation = _impl,
+ build_setting = config.string(flag = True),
+)
diff --git a/toolchains/jetpack/BUILD b/toolchains/jetpack/BUILD
index ed37a864b6..b51796bb3d 100644
--- a/toolchains/jetpack/BUILD
+++ b/toolchains/jetpack/BUILD
@@ -15,4 +15,4 @@ constraint_value(
constraint_value(
name = "6.1",
constraint_setting = ":jetpack",
-)
\ No newline at end of file
+)
diff --git a/version.txt b/version.txt
index 787e6e4ab7..11922a5ce1 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-2.7.0a0
+2.8.0a0