diff --git a/.github/workflows/docgen.yml b/.github/workflows/docgen.yml
index 65cecab407..5805a563f5 100644
--- a/.github/workflows/docgen.yml
+++ b/.github/workflows/docgen.yml
@@ -42,7 +42,7 @@ jobs:
run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
- name: Build Python Package
env:
- USE_CXX11_ABI: 1
+ USE_PRE_CXX11_ABI: 0
run: |
python3 -m pip install --pre . --extra-index-url https://download.pytorch.org/whl/nightly/cu126
- name: Generate New Docs
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 930321614b..76cda3827f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -75,7 +75,7 @@ Environment variables supported by nox
```
PYT_PATH - To use different PYTHONPATH than system installed Python packages
TOP_DIR - To set the root directory of the noxfile
-USE_CXX11 - To use cxx11_abi (Defaults to 0)
+USE_PRE_CXX11 - To use pre_cxx11_abi (Defaults to 0)
USE_HOST_DEPS - To use host dependencies for tests (Defaults to 0)
```
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 993f9d2a67..3fff741f11 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -12,8 +12,8 @@ RUN test -n "$TENSORRT_VERSION" || (echo "No tensorrt version specified, please
ARG PYTHON_VERSION=3.10
ENV PYTHON_VERSION=${PYTHON_VERSION}
-ARG USE_CXX11_ABI
-ENV USE_CXX11=${USE_CXX11_ABI}
+ARG USE_PRE_CXX11_ABI
+ENV USE_PRE_CXX11=${USE_PRE_CXX11_ABI}
ENV DEBIAN_FRONTEND=noninteractive
# Install basic dependencies
diff --git a/docker/README.md b/docker/README.md
index 7435973b1a..0037199990 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,15 +1,15 @@
# Building a Torch-TensorRT container
-* Use `Dockerfile` to build a container which provides the exact development environment that our master branch is usually tested against.
+* Use `Dockerfile` to build a container which provides the exact development environment that our main branch is usually tested against.
* The `Dockerfile` currently uses Bazelisk to select the Bazel version, and uses the exact library versions of Torch and CUDA listed in dependencies.
* The desired versions of TensorRT must be specified as build-args, with major and minor versions as in: `--build-arg TENSORRT_VERSION=a.b`
- * [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional
+ * [**Optional**] The desired base image be changed by explicitly setting a base image, as in `--build-arg BASE_IMG=nvidia/cuda:11.8.0-devel-ubuntu22.04`, though this is optional.
* [**Optional**] Additionally, the desired Python version can be changed by explicitly setting a version, as in `--build-arg PYTHON_VERSION=3.10`, though this is optional as well.
-* This `Dockerfile` installs `pre-cxx11-abi` versions of Pytorch and builds Torch-TRT using `pre-cxx11-abi` libtorch as well.
+* This `Dockerfile` installs `cxx11-abi` versions of Pytorch and builds Torch-TRT using `cxx11-abi` libtorch as well. As of torch 2.7, torch requires `cxx11-abi` for all CUDA 11.8, 12.4, and 12.6.
-Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch-TRT. If you are using a workflow that requires a build of PyTorch on the CXX11 ABI (e.g. using the PyTorch NGC containers as a base image), add the Docker build argument: `--build-arg USE_CXX11_ABI=1`
+Note: By default the container uses the `cxx11-abi` version of Torch + Torch-TRT. If you are using a workflow that requires a build of PyTorch on the PRE CXX11 ABI, please add the Docker build argument: `--build-arg USE_PRE_CXX11_ABI=1`
### Dependencies
@@ -17,14 +17,14 @@ Note: By default the container uses the `pre-cxx11-abi` version of Torch + Torch
### Instructions
-- The example below uses TensorRT 10.6.0.26
+- The example below uses TensorRT 10.7.0.23
- See dependencies for a list of current default dependencies.
> From root of Torch-TensorRT repo
Build:
```
-DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.6.0 -f docker/Dockerfile -t torch_tensorrt:latest .
+DOCKER_BUILDKIT=1 docker build --build-arg TENSORRT_VERSION=10.7.0 -f docker/Dockerfile -t torch_tensorrt:latest .
```
Run:
diff --git a/docker/dist-build.sh b/docker/dist-build.sh
index 04d2e8b84a..00ce6882c1 100755
--- a/docker/dist-build.sh
+++ b/docker/dist-build.sh
@@ -4,10 +4,10 @@ set -x
TOP_DIR=$(cd $(dirname $0); pwd)/..
-if [[ -z "${USE_CXX11}" ]]; then
+if [[ -z "${USE_PRE_CXX11}" ]]; then
BUILD_CMD="python -m pip wheel . --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist"
else
- BUILD_CMD="python -m pip wheel . --config-setting="--build-option=--use-cxx11-abi" --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist"
+ BUILD_CMD="python -m pip wheel . --config-setting="--build-option=--use-pre-cxx11-abi" --extra-index-url https://download.pytorch.org/whl/nightly/cu124 -w dist"
fi
# TensorRT restricts our pip version
diff --git a/docsrc/getting_started/installation.rst b/docsrc/getting_started/installation.rst
index 6ef093e839..3509860837 100644
--- a/docsrc/getting_started/installation.rst
+++ b/docsrc/getting_started/installation.rst
@@ -235,13 +235,13 @@ recommended commands:
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
| libtorch-shared-with-deps-*.zip from PyTorch.org | python -m pip install . | bazel build //:libtorchtrt -c opt \-\-config pre_cxx11_abi |
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
-| libtorch-cxx11-abi-shared-with-deps-*.zip from PyTorch.org | python setup.py bdist_wheel --use-cxx11-abi | bazel build //:libtorchtrt -c opt |
+| libtorch-cxx11-abi-shared-with-deps-*.zip from PyTorch.org | python setup.py bdist_wheel | bazel build //:libtorchtrt -c opt |
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
-| PyTorch preinstalled in an NGC container | python setup.py bdist_wheel --use-cxx11-abi | bazel build //:libtorchtrt -c opt |
+| PyTorch preinstalled in an NGC container | python setup.py bdist_wheel | bazel build //:libtorchtrt -c opt |
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
-| PyTorch from the NVIDIA Forums for Jetson | python setup.py bdist_wheel --use-cxx11-abi | bazel build //:libtorchtrt -c opt |
+| PyTorch from the NVIDIA Forums for Jetson | python setup.py bdist_wheel | bazel build //:libtorchtrt -c opt |
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
-| PyTorch built from Source | python setup.py bdist_wheel --use-cxx11-abi | bazel build //:libtorchtrt -c opt |
+| PyTorch built from Source | python setup.py bdist_wheel | bazel build //:libtorchtrt -c opt |
+-------------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------------------+
NOTE: For all of the above cases you must correctly declare the source of PyTorch you intend to use in your WORKSPACE file for both Python and C++ builds. See below for more information
@@ -383,8 +383,8 @@ Compile the Python API using the following command from the ``//py`` directory:
.. code-block:: shell
- python3 setup.py install --use-cxx11-abi
+ python3 setup.py install
-If you have a build of PyTorch that uses Pre-CXX11 ABI drop the ``--use-cxx11-abi`` flag
+If you have a build of PyTorch that uses Pre-CXX11 ABI drop the ``--use-pre-cxx11-abi`` flag
If you are building for Jetpack 4.5 add the ``--jetpack-version 5.0`` flag
diff --git a/docsrc/getting_started/jetpack.rst b/docsrc/getting_started/jetpack.rst
index f1b648fc3d..ddbf89dc63 100644
--- a/docsrc/getting_started/jetpack.rst
+++ b/docsrc/getting_started/jetpack.rst
@@ -115,5 +115,5 @@ Please make sure to build torch_tensorrt wheel file from source release/2.5 bran
# replace the MODULE.bazel with the jetpack one
cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
# build and install torch_tensorrt wheel file
- python setup.py --use-cxx11-abi install --user
+ python setup.py install --user
diff --git a/noxfile.py b/noxfile.py
index 9a5c3263bb..478de6c5da 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -21,10 +21,10 @@
)
print(f"Test root directory {TOP_DIR}")
-# Set the USE_CXX11=1 to use cxx11_abi
-USE_CXX11 = 0 if not "USE_CXX11" in os.environ else os.environ["USE_CXX11"]
-if USE_CXX11:
- print("Using cxx11 abi")
+# Set the USE_PRE_CXX11=1 to use pre_cxx11_abi
+USE_PRE_CXX11 = 0 if not "USE_PRE_CXX11" in os.environ else os.environ["USE_PRE_CXX11"]
+if USE_PRE_CXX11:
+ print("Using pre cxx11 abi")
# Set the USE_HOST_DEPS=1 to use host dependencies for tests
USE_HOST_DEPS = 0 if not "USE_HOST_DEPS" in os.environ else os.environ["USE_HOST_DEPS"]
@@ -61,8 +61,8 @@ def download_models(session):
def install_torch_trt(session):
print("Installing latest torch-tensorrt build")
session.chdir(os.path.join(TOP_DIR, "py"))
- if USE_CXX11:
- session.run("python", "setup.py", "develop", "--use-cxx11-abi")
+ if USE_PRE_CXX11:
+ session.run("python", "setup.py", "develop", "--use-pre-cxx11-abi")
else:
session.run("python", "setup.py", "develop")
diff --git a/packaging/env_vars.txt b/packaging/env_vars.txt
index 44a2350e0f..46f906b1ff 100644
--- a/packaging/env_vars.txt
+++ b/packaging/env_vars.txt
@@ -1,3 +1,2 @@
export CI_BUILD="1"
-export RELEASE="1"
-export USE_CXX11_ABI="1"
\ No newline at end of file
+export RELEASE="1"
\ No newline at end of file
diff --git a/py/README.md b/py/README.md
index 45c68ff98f..8da67dd74d 100644
--- a/py/README.md
+++ b/py/README.md
@@ -29,16 +29,16 @@ torch.jit.save(trt_ts_module, "trt_torchscript_module.ts") # save the TRT embedd
## Installation
-| ABI / Platform | Installation command |
-| --------------------------------------- | ------------------------------------------------------------ |
-| Pre CXX11 ABI (Linux x86_64) | python3 setup.py install |
-| CXX ABI (Linux x86_64) | python3 setup.py install --use-cxx11-abi |
-| Pre CXX11 ABI (Jetson platform aarch64) | python3 setup.py install --jetpack-version 4.6 |
-| CXX11 ABI (Jetson platform aarch64) | python3 setup.py install --jetpack-version 4.6 --use-cxx11-abi |
+| ABI / Platform | Installation command |
+| --------------------------------------- | ----------------------------------------------------------------- |
+| Pre CXX11 ABI (Linux x86_64) | python3 setup.py install --use-pre-cxx11-abi |
+| CXX ABI (Linux x86_64) | python3 setup.py install |
+| Pre CXX11 ABI (Jetson platform aarch64) | python3 setup.py install --jetpack-version 4.6 --use-pre-cxx11-abi|
+| CXX11 ABI (Jetson platform aarch64) | python3 setup.py install --jetpack-version 4.6 |
For Linux x86_64 platform, Pytorch libraries default to pre cxx11 abi. So, please use `python3 setup.py install`.
-On Jetson platforms, NVIDIA hosts pre-built Pytorch wheel files. These wheel files are built with CXX11 ABI. So on jetson platforms, please use `python3 setup.py install --jetpack-version 4.6 --use-cxx11-abi`
+On Jetson platforms, NVIDIA hosts pre-built Pytorch wheel files. These wheel files are built with CXX11 ABI. So on jetson platforms, please use `python3 setup.py install --jetpack-version 4.6`
## Under the Hood
diff --git a/py/ci/Dockerfile.ci b/py/ci/Dockerfile.ci
index 288c61029d..674fecf8d6 100644
--- a/py/ci/Dockerfile.ci
+++ b/py/ci/Dockerfile.ci
@@ -10,7 +10,6 @@ RUN wget -q https://developer.nvidia.com/downloads/compute/machine-learning/tens
ENV TENSORRT_DIR=/TensorRT-10.7.0.23
ENV TENSORRT_VERSION=10.7.0
-ENV USE_CXX11_ABI=1
RUN wget https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 \
&& mv bazelisk-linux-amd64 /usr/bin/bazel \
diff --git a/setup.py b/setup.py
index 5b46ccb585..2905e161d6 100644
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@ def load_dep_info():
dir_path = os.path.join(str(get_root_dir()), "py")
-CXX11_ABI = IS_WINDOWS
+PRE_CXX11_ABI = False
JETPACK_VERSION = None
PY_ONLY = False
NO_TS = False
@@ -136,13 +136,13 @@ def load_dep_info():
if ci_env_var == "1":
CI_BUILD = True
-if "--use-cxx11-abi" in sys.argv:
- sys.argv.remove("--use-cxx11-abi")
- CXX11_ABI = True
+if "--use-pre-cxx11-abi" in sys.argv:
+ sys.argv.remove("--use-pre-cxx11-abi")
+ PRE_CXX11_ABI = True
-if (cxx11_abi_env_var := os.environ.get("USE_CXX11_ABI")) is not None:
- if cxx11_abi_env_var == "1":
- CXX11_ABI = True
+if (pre_cxx11_abi_env_var := os.environ.get("USE_PRE_CXX11_ABI")) is not None:
+ if pre_cxx11_abi_env_var == "1":
+ PRE_CXX11_ABI = True
if platform.uname().processor == "aarch64":
if "--jetpack-version" in sys.argv:
@@ -165,9 +165,9 @@ def load_dep_info():
)
JETPACK_VERSION = "6.1"
- if not CXX11_ABI:
+ if PRE_CXX11_ABI:
warnings.warn(
- "Jetson platform detected but did not see --use-cxx11-abi option, if using a pytorch distribution provided by NVIDIA include this flag"
+ "Jetson platform detected. Please remove --use-pre-cxx11-abi flag if you are using it."
)
@@ -182,7 +182,7 @@ def load_dep_info():
def build_libtorchtrt_pre_cxx11_abi(
- develop=True, use_dist_dir=True, cxx11_abi=False, rt_only=False
+ develop=True, use_dist_dir=True, pre_cxx11_abi=True, rt_only=False
):
cmd = [BAZEL_EXE, "build"]
if rt_only:
@@ -196,7 +196,7 @@ def build_libtorchtrt_pre_cxx11_abi(
cmd.append("--compilation_mode=opt")
if use_dist_dir:
cmd.append("--distdir=third_party/dist_dir/x86_64-linux-gnu")
- if not cxx11_abi:
+ if pre_cxx11_abi:
cmd.append("--config=python")
else:
print("using CXX11 ABI build")
@@ -292,9 +292,9 @@ def finalize_options(self):
def run(self):
if not PY_ONLY:
- global CXX11_ABI
+ global PRE_CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(
- develop=True, cxx11_abi=CXX11_ABI, rt_only=NO_TS
+ develop=True, pre_cxx11_abi=PRE_CXX11_ABI, rt_only=NO_TS
)
copy_libtorchtrt(rt_only=NO_TS)
@@ -316,9 +316,9 @@ def finalize_options(self):
def run(self):
if not PY_ONLY:
- global CXX11_ABI
+ global PRE_CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(
- develop=False, cxx11_abi=CXX11_ABI, rt_only=NO_TS
+ develop=False, pre_cxx11_abi=PRE_CXX11_ABI, rt_only=NO_TS
)
copy_libtorchtrt(rt_only=NO_TS)
@@ -339,9 +339,9 @@ def finalize_options(self):
def run(self):
if not PY_ONLY:
- global CXX11_ABI
+ global PRE_CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(
- develop=False, cxx11_abi=CXX11_ABI, rt_only=NO_TS
+ develop=False, pre_cxx11_abi=PRE_CXX11_ABI, rt_only=NO_TS
)
copy_libtorchtrt(rt_only=NO_TS)
@@ -365,9 +365,9 @@ def run(self):
gen_version_file()
editable_wheel.run(self)
else:
- global CXX11_ABI
+ global PRE_CXX11_ABI
build_libtorchtrt_pre_cxx11_abi(
- develop=True, cxx11_abi=CXX11_ABI, rt_only=NO_TS
+ develop=True, pre_cxx11_abi=PRE_CXX11_ABI, rt_only=NO_TS
)
gen_version_file()
copy_libtorchtrt(rt_only=NO_TS)
@@ -561,9 +561,9 @@ def run(self):
"-Wno-deprecated-declarations",
]
+ (
- ["-D_GLIBCXX_USE_CXX11_ABI=1"]
- if CXX11_ABI
- else ["-D_GLIBCXX_USE_CXX11_ABI=0"]
+ ["-D_GLIBCXX_USE_CXX11_ABI=0"]
+ if PRE_CXX11_ABI
+ else ["-D_GLIBCXX_USE_CXX11_ABI=1"]
)
),
extra_link_args=(
@@ -584,9 +584,9 @@ def run(self):
"-export-dynamic",
]
+ (
- ["-D_GLIBCXX_USE_CXX11_ABI=1"]
- if CXX11_ABI
- else ["-D_GLIBCXX_USE_CXX11_ABI=0"]
+ ["-D_GLIBCXX_USE_CXX11_ABI=0"]
+ if PRE_CXX11_ABI
+ else ["-D_GLIBCXX_USE_CXX11_ABI=1"]
)
),
undef_macros=["NDEBUG"],