diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 6732ab7256..516e2d4743 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -16,7 +16,8 @@ jobs: - "PT110+CUDA113" - "PT113+CUDA118" - "PT210+CUDA121" - - "PTLATEST+CUDA124" + - "PT240+CUDA126" + - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - environment: PT110+CUDA113 @@ -28,9 +29,12 @@ jobs: - environment: PT210+CUDA121 pytorch: "pytorch==2.1.0 torchvision==0.16.0 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.1 - - environment: PTLATEST+CUDA124 + - environment: PT240+CUDA126 + pytorch: "pytorch==2.4.0 torchvision==0.19.0 --extra-index-url https://download.pytorch.org/whl/cu121" + base: "nvcr.io/nvidia/pytorch:24.08-py3" # CUDA 12.6 + - environment: PTLATEST+CUDA126 pytorch: "-U torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121" - base: "nvcr.io/nvidia/pytorch:24.08-py3" # CUDA 12.4 + base: "nvcr.io/nvidia/pytorch:24.10-py3" # CUDA 12.6 container: image: ${{ matrix.base }} options: "--gpus all" @@ -80,7 +84,7 @@ jobs: if: github.repository == 'Project-MONAI/MONAI' strategy: matrix: - container: ["pytorch:23.08", "pytorch:24.08"] + container: ["pytorch:23.08", "pytorch:24.08", "pytorch:24.10"] container: image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image options: "--gpus all" @@ -129,7 +133,7 @@ jobs: if: github.repository == 'Project-MONAI/MONAI' strategy: matrix: - container: ["pytorch:24.08"] + container: ["pytorch:24.10"] container: image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image options: "--gpus all" @@ -233,7 +237,7 @@ jobs: if: github.repository == 'Project-MONAI/MONAI' needs: cron-gpu # so that monai itself is verified first container: - image: nvcr.io/nvidia/pytorch:24.08-py3 # testing with the latest pytorch base image + image: nvcr.io/nvidia/pytorch:24.10-py3 # testing with the latest pytorch base image options: "--gpus all --ipc=host" runs-on: [self-hosted, linux, x64, integration] steps: diff --git a/Dockerfile b/Dockerfile index e45932c6bb..5fcfcf274d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ # To build with a different base image # please run `docker build` using the `--build-arg PYTORCH_IMAGE=...` flag. -ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.08-py3 +ARG PYTORCH_IMAGE=nvcr.io/nvidia/pytorch:24.10-py3 FROM ${PYTORCH_IMAGE} LABEL maintainer="monai.contact@gmail.com" diff --git a/tests/test_trt_compile.py b/tests/test_trt_compile.py index 5a56f0e4a2..6df5d520bd 100644 --- a/tests/test_trt_compile.py +++ b/tests/test_trt_compile.py @@ -46,7 +46,7 @@ def tearDown(self): if current_device != self.gpu_device: torch.cuda.set_device(self.gpu_device) - @SkipIfAtLeastPyTorchVersion((2, 5, 0)) + @SkipIfAtLeastPyTorchVersion((2, 4, 1)) def test_handler(self): from ignite.engine import Engine