Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
lanluo-nvidia committed Nov 4, 2024
1 parent 546a574 commit 41aec8b
Show file tree
Hide file tree
Showing 8 changed files with 146 additions and 19 deletions.
10 changes: 10 additions & 0 deletions .github/scripts/generate-tensorrt-test-matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@
"strip_prefix": "TensorRT-10.5.0.18",
"sha256": "e6436f4164db4e44d727354dccf7d93755efb70d6fbfd6fa95bdfeb2e7331b24",
},
"10.6.0": {
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/zip/TensorRT-10.6.0.26.Windows.win10.cuda-12.6.zip",
"strip_prefix": "TensorRT-10.6.0.26",
"sha256": "6c6d92c108a1b3368423e8f69f08d31269830f1e4c9da43b37ba34a176797254",
},
},
"linux": {
"10.4.0": {
Expand All @@ -41,6 +46,11 @@
"strip_prefix": "TensorRT-10.5.0.18",
"sha256": "f404d379d639552a3e026cd5267213bd6df18a4eb899d6e47815bbdb34854958",
},
"10.6.0": {
"urls": "https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.6.0/tars/TensorRT-10.6.0.26.Linux.x86_64-gnu.cuda-12.6.tar.gz",
"strip_prefix": "TensorRT-10.6.0.26",
"sha256": "f404d379d639552a3e026cd5267213bd6df18a4eb899d6e47815bbdb34854958",
},
},
}

Expand Down
7 changes: 2 additions & 5 deletions .github/workflows/build-tensorrt-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,6 @@ on:
required: false
type: boolean
default: true
upload-artifact:
description: 'Name to give artifacts uploaded from ${inputs.repository}/dist'
default: ''
type: string

permissions:
id-token: write
Expand All @@ -96,6 +92,7 @@ jobs:
TENSORRT_VERSION: ${{ matrix.tensorrt.version }}
TENSORRT_URLS: ${{ matrix.tensorrt.urls }}
TENSORRT_SHA256: ${{ matrix.tensorrt.sha256 }}
ARTIFACT_NAME: torch_tensorrt_${{ matrix.tensorrt.version }}_py${{ matrix.python_version }}_${{ matrix.desired_cuda }}
name: build_tensorrt${{ matrix.tensorrt.version }}_py${{matrix.python_version}}_${{matrix.desired_cuda}}
runs-on: ${{ matrix.validation_runner }}
container:
Expand Down Expand Up @@ -217,7 +214,7 @@ jobs:
continue-on-error: true
uses: actions/upload-artifact@v3
with:
name: ${{ inputs.upload-artifact }}
name: ${{ env.ARTIFACT_NAME }}
path: ${{ inputs.repository }}/dist

concurrency:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build-test-linux.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name: Build and test Linux wheels

on:
# pull_request:
pull_request:
push:
branches:
- main
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/build-test-tensorrt-linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ jobs:
package-name: ${{ matrix.package-name }}
smoke-test-script: ${{ matrix.smoke-test-script }}
trigger-event: ${{ github.event_name }}
upload-artifact: torch_tensorrt_${{ needs.generate-tensorrt-matrix.outputs.matrix.tensorrt.version }}_py${{ needs.generate-tensorrt-matrix.outputs.matrix.python_version }}_${{ needs.generate-tensorrt-matrix.outputs.matrix.desired_cuda }}

tests-py-torchscript-fe:
name: Test torchscript frontend [Python]
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build-test-windows.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name: Build and test Windows wheels

on:
# pull_request:
pull_request:
push:
branches:
- main
Expand Down
16 changes: 11 additions & 5 deletions packaging/pre_build_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@ pip install --force-reinstall --pre ${TORCH_TORCHVISION} --index-url ${INDEX_URL
export TORCH_BUILD_NUMBER=$(python -c "import torch, urllib.parse as ul; print(ul.quote_plus(torch.__version__))")
export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))")

# replace current tensorrt version to the upgraded tensorrt version
current_version="10.3.0"
sed -i -e "s/tensorrt-cu12==${current_version}/tensorrt-cu12==${TENSORRT_VERSION}/g" \
if [[ ${TENSORRT_VERSION} != "" ]]; then
# this is the upgraded TensorRT version, replace current tensorrt version to the upgrade tensorRT version in the pyproject.toml
current_version=$(cat dev_dep_versions.yml | grep __tensorrt_version__ | sed 's/__tensorrt_version__: //g' | sed 's/"//g')
sed -i -e "s/tensorrt-cu12==${current_version}/tensorrt-cu12==${TENSORRT_VERSION}/g" \
-e "s/tensorrt-cu12-bindings==${current_version}/tensorrt-cu12-bindings==${TENSORRT_VERSION}/g" \
-e "s/tensorrt-cu12-libs==${current_version}/tensorrt-cu12-libs==${TENSORRT_VERSION}/g" \
pyproject.toml
pyproject.toml
fi

if [[ "${CU_VERSION::4}" < "cu12" ]]; then
# replace dependencies from tensorrt-cu12-bindings/libs to tensorrt-cu11-bindings/libs
Expand All @@ -36,7 +38,11 @@ if [[ "${CU_VERSION::4}" < "cu12" ]]; then
pyproject.toml
fi

cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
if [[ ${TENSORRT_VERSION} != "" ]]; then
cat toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl | envsubst > MODULE.bazel
else
cat toolchains/ci_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel
fi

cat MODULE.bazel
export CI_BUILD=1
12 changes: 6 additions & 6 deletions toolchains/ci_workspaces/MODULE.bazel.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -67,20 +67,20 @@ http_archive(
http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "${TENSORRT_SHA256}",
strip_prefix = "${TENSORRT_STRIP_PREFIX}",
sha256 = "adff1cd5abe5d87013806172351e58fd024e5bf0fc61d49ef4b84cd38ed99081",
strip_prefix = "TensorRT-10.3.0.26",
urls = [
"${TENSORRT_URLS}",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.Linux.x86_64-gnu.cuda-12.5.tar.gz",
],
)

http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "${TENSORRT_SHA256}",
strip_prefix = "${TENSORRT_STRIP_PREFIX}",
sha256 = "2bb4bcb79e8c33575816d874b0512ea28c302af1c06ee6d224da71aa182f75e0",
strip_prefix = "TensorRT-10.3.0.26",
urls = [
"${TENSORRT_URLS}",
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/zip/TensorRT-10.3.0.26.Windows.win10.cuda-12.5.zip",
],
)

Expand Down
115 changes: 115 additions & 0 deletions toolchains/ci_workspaces/MODULE_tensorrt.bazel.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
module(
name = "torch_tensorrt",
repo_name = "org_pytorch_tensorrt",
version = "${BUILD_VERSION}"
)

bazel_dep(name = "googletest", version = "1.14.0")
bazel_dep(name = "platforms", version = "0.0.10")
bazel_dep(name = "rules_cc", version = "0.0.9")
bazel_dep(name = "rules_python", version = "0.34.0")

python = use_extension("@rules_python//python/extensions:python.bzl", "python")
python.toolchain(
ignore_root_user_error = True,
python_version = "3.11",
)

bazel_dep(name = "rules_pkg", version = "1.0.1")
git_override(
module_name = "rules_pkg",
commit = "17c57f4",
remote = "https://github.com/narendasan/rules_pkg",
)

local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "local_repository")

# External dependency for torch_tensorrt if you already have precompiled binaries.
local_repository(
name = "torch_tensorrt",
path = "/opt/conda/lib/python3.8/site-packages/torch_tensorrt",
)


new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")

# CUDA should be installed on the system locally
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
path = "${CUDA_HOME}",
)

new_local_repository(
name = "cuda_win",
build_file = "@//third_party/cuda:BUILD",
path = "${CUDA_HOME}",
)


http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")

#############################################################################################################
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
#############################################################################################################

http_archive(
name = "libtorch",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/${CHANNEL}/${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-latest.zip"],
)

# Download these tarballs manually from the NVIDIA website
# Either place them in the distdir directory in third_party and use the --distdir flag
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz

http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "${TENSORRT_SHA256}",
strip_prefix = "${TENSORRT_STRIP_PREFIX}",
urls = [
"${TENSORRT_URLS}",
],
)

http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
sha256 = "${TENSORRT_SHA256}",
strip_prefix = "${TENSORRT_STRIP_PREFIX}",
urls = [
"${TENSORRT_URLS}",
],
)


####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################

# NOTE: In the case you are using just the pre-cxx11-abi path or just the cxx11 abi path
# with your local libtorch, just point deps at the same path to satisfy bazel.

# NOTE: NVIDIA's aarch64 PyTorch (python) wheel file uses the CXX11 ABI unlike PyTorch's standard
# x86_64 python distribution. If using NVIDIA's version just point to the root of the package
# for both versions here and do not use --config=pre-cxx11-abi

new_local_repository(
name = "libtorch_win",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD"
)

new_local_repository(
name = "libtorch_pre_cxx11_abi",
path = "${TORCH_INSTALL_PATH}",
build_file = "third_party/libtorch/BUILD"
)

#new_local_repository(
# name = "tensorrt",
# path = "/usr/",
# build_file = "@//third_party/tensorrt/local:BUILD"
#)

0 comments on commit 41aec8b

Please sign in to comment.