diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index e13848f8fc..2e7921ec94 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,16 +13,12 @@ jobs: strategy: matrix: environment: - - "PT110+CUDA113" - "PT113+CUDA118" - "PT210+CUDA121" - "PT240+CUDA126" - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT110+CUDA113 - pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu113" - base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3 - environment: PT113+CUDA118 pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8 diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index d8623c8087..cd916f2ebb 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -22,20 +22,10 @@ jobs: strategy: matrix: environment: - - "PT19+CUDA114DOCKER" - - "PT110+CUDA111" - - "PT112+CUDA118DOCKER" - "PT113+CUDA116" - "PT210+CUDA121DOCKER" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT110+CUDA111 - pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu111" - base: "nvcr.io/nvidia/cuda:11.1.1-devel-ubuntu18.04" - - environment: PT112+CUDA118DOCKER - # 22.09: 1.13.0a0+d0d6b1f - pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error - base: "nvcr.io/nvidia/pytorch:22.09-py3" - environment: PT113+CUDA116 pytorch: "torch==1.13.1 torchvision==0.14.1" base: "nvcr.io/nvidia/cuda:11.6.1-devel-ubuntu18.04" @@ -59,8 +49,7 @@ jobs: apt-get update apt-get install -y wget - if [ ${{ matrix.environment }} = "PT110+CUDA111" ] || \ - [ ${{ matrix.environment }} = "PT113+CUDA116" ] + if [ ${{ matrix.environment }} = "PT113+CUDA116" ] then PYVER=3.9 PYSFX=3 DISTUTILS=python3-distutils && \ apt-get update && apt-get install -y --no-install-recommends \ @@ -94,9 +83,6 @@ jobs: python get-pip.py && \ rm get-pip.py; fi - - if: matrix.environment == 'PT19+CUDA114DOCKER' - name: Optional Cupy dependency (cuda114) - run: echo "cupy-cuda114" >> requirements-dev.txt - name: Install dependencies if: github.event.pull_request.merged != true run: | diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index b0d37937e9..c94a94ed89 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -124,7 +124,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: ['1.10.2', '1.11.0', '1.12.1', '1.13', '2.0.1', 'latest'] + pytorch-version: ['1.13.1', '2.0.1', 'latest'] timeout-minutes: 40 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 3c39166c1e..f175cc3f7c 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -155,7 +155,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=1.9 torchvision + python -m pip install torch>=1.13.1 torchvision - name: Check packages run: | pip uninstall monai diff --git a/docs/requirements.txt b/docs/requirements.txt index 7307d8e5f9..d657580743 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ --f https://download.pytorch.org/whl/cpu/torch-1.12.1%2Bcpu-cp37-cp37m-linux_x86_64.whl -torch>=1.9 +-f https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl +torch>=1.13.1 pytorch-ignite==0.4.11 numpy>=1.20 itk>=5.2 diff --git a/environment-dev.yml b/environment-dev.yml index 4a1723e8a5..8617a3b9cb 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -6,7 +6,7 @@ channels: - conda-forge dependencies: - numpy>=1.24,<2.0 - - pytorch>=1.9 + - pytorch>=1.13.1 - torchio - torchvision - pytorch-cuda>=11.6 diff --git a/monai/apps/auto3dseg/transforms.py b/monai/apps/auto3dseg/transforms.py index bb755aa78c..28f82e3725 100644 --- a/monai/apps/auto3dseg/transforms.py +++ b/monai/apps/auto3dseg/transforms.py @@ -18,7 +18,6 @@ import torch from monai.config import KeysCollection -from monai.networks.utils import pytorch_after from monai.transforms import MapTransform from monai.utils.misc import ImageMetaKey @@ -76,7 +75,7 @@ def __call__(self, data: Mapping[Hashable, torch.Tensor]) -> dict[Hashable, torc d[key] = torch.nn.functional.interpolate( input=d[key].unsqueeze(0), size=image_shape, - mode="nearest-exact" if pytorch_after(1, 11) else "nearest", + mode="nearest-exact", ).squeeze(0) else: raise ValueError( diff --git a/monai/data/utils.py b/monai/data/utils.py index f35c5124d8..d03dbd3234 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -50,7 +50,6 @@ issequenceiterable, look_up_option, optional_import, - pytorch_after, ) pd, _ = optional_import("pandas") @@ -450,12 +449,9 @@ def collate_meta_tensor_fn(batch, *, collate_fn_map=None): Collate a sequence of meta tensor into a single batched metatensor. This is called by `collage_meta_tensor` and so should not be used as a collate function directly in dataloaders. """ - if pytorch_after(1, 13): - from torch.utils.data._utils.collate import collate_tensor_fn # imported here for pylint/mypy issues + from torch.utils.data._utils.collate import collate_tensor_fn # imported here for pylint/mypy issues - collated = collate_tensor_fn(batch) - else: - collated = default_collate(batch) + collated = collate_tensor_fn(batch) meta_dicts = [i.meta or TraceKeys.NONE for i in batch] common_ = set.intersection(*[set(d.keys()) for d in meta_dicts if isinstance(d, dict)]) @@ -494,18 +490,15 @@ def list_data_collate(batch: Sequence): Need to use this collate if apply some transforms that can generate batch data. """ + from torch.utils.data._utils.collate import default_collate_fn_map - if pytorch_after(1, 13): - # needs to go here to avoid circular import - from torch.utils.data._utils.collate import default_collate_fn_map - - from monai.data.meta_tensor import MetaTensor + from monai.data.meta_tensor import MetaTensor - default_collate_fn_map.update({MetaTensor: collate_meta_tensor_fn}) + default_collate_fn_map.update({MetaTensor: collate_meta_tensor_fn}) elem = batch[0] data = [i for k in batch for i in k] if isinstance(elem, list) else batch key = None - collate_fn = default_collate if pytorch_after(1, 13) else collate_meta_tensor + collate_fn = default_collate try: if config.USE_META_DICT: data = pickle_operations(data) # bc 0.9.0 diff --git a/monai/inferers/utils.py b/monai/inferers/utils.py index edaf736091..8adba8fa25 100644 --- a/monai/inferers/utils.py +++ b/monai/inferers/utils.py @@ -31,11 +31,10 @@ fall_back_tuple, look_up_option, optional_import, - pytorch_after, ) tqdm, _ = optional_import("tqdm", name="tqdm") -_nearest_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest" +_nearest_mode = "nearest-exact" __all__ = ["sliding_window_inference"] diff --git a/monai/losses/dice.py b/monai/losses/dice.py index 4108820bec..ee45b5c3b2 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -25,7 +25,7 @@ from monai.losses.spatial_mask import MaskedLoss from monai.losses.utils import compute_tp_fp_fn from monai.networks import one_hot -from monai.utils import DiceCEReduction, LossReduction, Weight, look_up_option, pytorch_after +from monai.utils import DiceCEReduction, LossReduction, Weight, look_up_option class DiceLoss(_Loss): @@ -738,12 +738,9 @@ def __init__( batch=batch, weight=dice_weight, ) - if pytorch_after(1, 10): - self.cross_entropy = nn.CrossEntropyLoss( - weight=weight, reduction=reduction, label_smoothing=label_smoothing - ) - else: - self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction) + self.cross_entropy = nn.CrossEntropyLoss( + weight=weight, reduction=reduction, label_smoothing=label_smoothing + ) self.binary_cross_entropy = nn.BCEWithLogitsLoss(pos_weight=weight, reduction=reduction) if lambda_dice < 0.0: raise ValueError("lambda_dice should be no less than 0.0.") @@ -751,7 +748,6 @@ def __init__( raise ValueError("lambda_ce should be no less than 0.0.") self.lambda_dice = lambda_dice self.lambda_ce = lambda_ce - self.old_pt_ver = not pytorch_after(1, 10) def ce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: """ @@ -764,12 +760,6 @@ def ce(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: if n_pred_ch != n_target_ch and n_target_ch == 1: target = torch.squeeze(target, dim=1) target = target.long() - elif self.old_pt_ver: - warnings.warn( - f"Multichannel targets are not supported in this older Pytorch version {torch.__version__}. " - "Using argmax (as a workaround) to convert target to a single channel." - ) - target = torch.argmax(target, dim=1) elif not torch.is_floating_point(target): target = target.to(dtype=input.dtype) diff --git a/monai/losses/ds_loss.py b/monai/losses/ds_loss.py index aacc16874d..6a604aa22d 100644 --- a/monai/losses/ds_loss.py +++ b/monai/losses/ds_loss.py @@ -17,8 +17,6 @@ import torch.nn.functional as F from torch.nn.modules.loss import _Loss -from monai.utils import pytorch_after - class DeepSupervisionLoss(_Loss): """ @@ -42,7 +40,7 @@ def __init__(self, loss: _Loss, weight_mode: str = "exp", weights: list[float] | self.loss = loss self.weight_mode = weight_mode self.weights = weights - self.interp_mode = "nearest-exact" if pytorch_after(1, 11) else "nearest" + self.interp_mode = "nearest-exact" def get_weights(self, levels: int = 1) -> list[float]: """ diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 4acd4a3622..6d34c3fa77 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -31,7 +31,6 @@ issequenceiterable, look_up_option, optional_import, - pytorch_after, ) _C, _ = optional_import("monai._C") @@ -293,14 +292,7 @@ def apply_filter(x: torch.Tensor, kernel: torch.Tensor, **kwargs) -> torch.Tenso x = x.view(1, kernel.shape[0], *spatials) conv = [F.conv1d, F.conv2d, F.conv3d][n_spatial - 1] if "padding" not in kwargs: - if pytorch_after(1, 10): - kwargs["padding"] = "same" - else: - # even-sized kernels are not supported - kwargs["padding"] = [(k - 1) // 2 for k in kernel.shape[2:]] - elif kwargs["padding"] == "same" and not pytorch_after(1, 10): - # even-sized kernels are not supported - kwargs["padding"] = [(k - 1) // 2 for k in kernel.shape[2:]] + kwargs["padding"] = "same" if "stride" not in kwargs: kwargs["stride"] = 1 @@ -372,11 +364,7 @@ def _make_coeffs(window_length, order): a = idx ** torch.arange(order + 1, dtype=torch.float, device="cpu").reshape(-1, 1) y = torch.zeros(order + 1, dtype=torch.float, device="cpu") y[0] = 1.0 - return ( - torch.lstsq(y, a).solution.squeeze() # type: ignore - if not pytorch_after(1, 11) - else torch.linalg.lstsq(a, y).solution.squeeze() - ) + return torch.linalg.lstsq(a, y).solution.squeeze() class HilbertTransform(nn.Module): diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 1b4cb220ae..2279bed0b4 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -31,7 +31,7 @@ from monai.apps.utils import get_logger from monai.config import PathLike from monai.utils.misc import ensure_tuple, save_obj, set_determinism -from monai.utils.module import look_up_option, optional_import, pytorch_after +from monai.utils.module import look_up_option, optional_import from monai.utils.type_conversion import convert_to_dst_type, convert_to_tensor onnx, _ = optional_import("onnx") @@ -676,15 +676,6 @@ def convert_to_onnx( torch_versioned_kwargs["verify"] = verify verify = False else: - if not pytorch_after(1, 10): - if "example_outputs" not in kwargs: - # https://github.com/pytorch/pytorch/blob/release/1.9/torch/onnx/__init__.py#L182 - raise TypeError( - "example_outputs is required in scripting mode before PyTorch 1.10." - "Please provide example outputs or use trace mode to export onnx model." - ) - torch_versioned_kwargs["example_outputs"] = kwargs["example_outputs"] - del kwargs["example_outputs"] mode_to_export = torch.jit.script(model, **kwargs) if torch.is_tensor(inputs) or isinstance(inputs, dict): @@ -746,8 +737,7 @@ def convert_to_onnx( # compare onnx/ort and PyTorch results for r1, r2 in zip(torch_out, onnx_out): if isinstance(r1, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1.cpu(), convert_to_tensor(r2, dtype=r1.dtype), rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1.cpu(), convert_to_tensor(r2, dtype=r1.dtype), rtol=rtol, atol=atol) # type: ignore return onnx_model @@ -817,8 +807,7 @@ def convert_to_torchscript( # compare TorchScript and PyTorch results for r1, r2 in zip(torch_out, torchscript_out): if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1, r2, rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1, r2, rtol=rtol, atol=atol) # type: ignore return script_module @@ -1031,8 +1020,7 @@ def convert_to_trt( # compare TorchScript and PyTorch results for r1, r2 in zip(torch_out, trt_out): if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor): - assert_fn = torch.testing.assert_close if pytorch_after(1, 11) else torch.testing.assert_allclose - assert_fn(r1, r2, rtol=rtol, atol=atol) # type: ignore + torch.testing.assert_close(r1, r2, rtol=rtol, atol=atol) # type: ignore return trt_model diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index 813f8c1d44..d5ca876e98 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -56,7 +56,6 @@ ensure_tuple_rep, fall_back_tuple, look_up_option, - pytorch_after, ) __all__ = [ @@ -392,11 +391,7 @@ def compute_slices( roi_center_t = convert_to_tensor(data=roi_center, dtype=torch.int16, wrap_sequence=True, device="cpu") roi_size_t = convert_to_tensor(data=roi_size, dtype=torch.int16, wrap_sequence=True, device="cpu") _zeros = torch.zeros_like(roi_center_t) - half = ( - torch.divide(roi_size_t, 2, rounding_mode="floor") - if pytorch_after(1, 8) - else torch.floor_divide(roi_size_t, 2) - ) + half = torch.divide(roi_size_t, 2, rounding_mode="floor") roi_start_t = torch.maximum(roi_center_t - half, _zeros) roi_end_t = torch.maximum(roi_start_t + roi_size_t, roi_start_t) else: diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index e7e1616e13..1ff0abc27c 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -68,7 +68,6 @@ look_up_option, min_version, optional_import, - pytorch_after, unsqueeze_left, unsqueeze_right, ) @@ -2255,7 +2254,7 @@ def _to_torch_resample_interp_mode(interp_mode): if ret is not None: return ret _mapping = { - SplineMode.ZERO: InterpolateMode.NEAREST_EXACT if pytorch_after(1, 11) else InterpolateMode.NEAREST, + SplineMode.ZERO: InterpolateMode.NEAREST_EXACT, SplineMode.ONE: InterpolateMode.LINEAR, SplineMode.THREE: InterpolateMode.BICUBIC, } diff --git a/monai/utils/tf32.py b/monai/utils/tf32.py index cfb023bdeb..81f56477bb 100644 --- a/monai/utils/tf32.py +++ b/monai/utils/tf32.py @@ -60,16 +60,6 @@ def detect_default_tf32() -> bool: if not has_ampere_or_later(): return False - from monai.utils.module import pytorch_after - - if pytorch_after(1, 7, 0) and not pytorch_after(1, 12, 0): - warnings.warn( - "torch.backends.cuda.matmul.allow_tf32 = True by default.\n" - " This value defaults to True when PyTorch version in [1.7, 1.11] and may affect precision.\n" - " See https://docs.monai.io/en/latest/precision_accelerating.html#precision-and-accelerating" - ) - may_enable_tf32 = True - override_tf32_env_vars = {"NVIDIA_TF32_OVERRIDE": "1"} # TORCH_ALLOW_TF32_CUBLAS_OVERRIDE not checked #6907 for name, override_val in override_tf32_env_vars.items(): if os.environ.get(name) == override_val: diff --git a/monai/visualize/class_activation_maps.py b/monai/visualize/class_activation_maps.py index 489a563818..39f26d0fbd 100644 --- a/monai/visualize/class_activation_maps.py +++ b/monai/visualize/class_activation_maps.py @@ -22,7 +22,7 @@ from monai.config import NdarrayTensor from monai.transforms import ScaleIntensity -from monai.utils import ensure_tuple, pytorch_after +from monai.utils import ensure_tuple from monai.visualize.visualizer import default_upsampler __all__ = ["CAM", "GradCAM", "GradCAMpp", "ModelWithHooks", "default_normalizer"] @@ -83,13 +83,10 @@ def __init__( continue _registered.append(name) if self.register_backward: - if pytorch_after(1, 8): - if "inplace" in mod.__dict__ and mod.__dict__["inplace"]: - # inplace=True causes errors for register_full_backward_hook - mod.__dict__["inplace"] = False - mod.register_full_backward_hook(self.backward_hook(name)) - else: - mod.register_backward_hook(self.backward_hook(name)) + if "inplace" in mod.__dict__ and mod.__dict__["inplace"]: + # inplace=True causes errors for register_full_backward_hook + mod.__dict__["inplace"] = False + mod.register_full_backward_hook(self.backward_hook(name)) if self.register_forward: mod.register_forward_hook(self.forward_hook(name)) if self.target_layers and (len(_registered) != len(self.target_layers)): diff --git a/pyproject.toml b/pyproject.toml index 9dc9cf619b..8ad55b1c2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.9", + "torch>=1.13.1", "ninja", "packaging" ] diff --git a/requirements.txt b/requirements.txt index e184322c13..46ff887c59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -torch>=1.9 +torch>=1.13.1 numpy>=1.24,<2.0 diff --git a/setup.cfg b/setup.cfg index 0c69051218..66d9e19609 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ setup_requires = ninja packaging install_requires = - torch>=1.9 + torch>=1.13.1 numpy>=1.24,<2.0 [options.extras_require] diff --git a/tests/test_cachedataset.py b/tests/test_cachedataset.py index dbb1b8f8f1..0c0a7ef286 100644 --- a/tests/test_cachedataset.py +++ b/tests/test_cachedataset.py @@ -22,7 +22,6 @@ from monai.data import CacheDataset, DataLoader, PersistentDataset, SmartCacheDataset from monai.transforms import Compose, Lambda, LoadImaged, RandLambda, ThreadUnsafe, Transform -from monai.utils.module import pytorch_after TEST_CASE_1 = [Compose([LoadImaged(keys=["image", "label", "extra"])]), (128, 128, 128)] @@ -130,7 +129,7 @@ class TestCacheThread(unittest.TestCase): @parameterized.expand(TEST_DS) def test_thread_safe(self, persistent_workers, cache_workers, loader_workers): expected = [102, 202, 302, 402, 502, 602, 702, 802, 902, 1002] - _kwg = {"persistent_workers": persistent_workers} if pytorch_after(1, 8) else {} + _kwg = {"persistent_workers": persistent_workers} data_list = list(range(1, 11)) dataset = CacheDataset( data=data_list, transform=_StatefulTransform(), cache_rate=1.0, num_workers=cache_workers, progress=False diff --git a/tests/test_convert_to_onnx.py b/tests/test_convert_to_onnx.py index 798c510800..e08737926b 100644 --- a/tests/test_convert_to_onnx.py +++ b/tests/test_convert_to_onnx.py @@ -20,7 +20,6 @@ from monai.networks import convert_to_onnx from monai.networks.nets import SegResNet, UNet -from monai.utils.module import pytorch_after from tests.utils import SkipIfBeforePyTorchVersion, SkipIfNoModule, optional_import, skip_if_quick if torch.cuda.is_available(): @@ -53,7 +52,7 @@ def test_unet(self, device, use_trace, use_ort): model = UNet( spatial_dims=2, in_channels=1, out_channels=3, channels=(16, 32, 64), strides=(2, 2), num_res_units=0 ) - if pytorch_after(1, 10) or use_trace: + if use_trace: onnx_model = convert_to_onnx( model=model, inputs=[torch.randn((16, 1, 32, 32), requires_grad=False)], @@ -66,22 +65,6 @@ def test_unet(self, device, use_trace, use_ort): rtol=rtol, atol=atol, ) - else: - # https://github.com/pytorch/pytorch/blob/release/1.9/torch/onnx/__init__.py#L182 - # example_outputs is required in scripting mode before PyTorch 3.10 - onnx_model = convert_to_onnx( - model=model, - inputs=[torch.randn((16, 1, 32, 32), requires_grad=False)], - input_names=["x"], - output_names=["y"], - example_outputs=[torch.randn((16, 3, 32, 32), requires_grad=False)], - verify=True, - device=device, - use_ort=use_ort, - use_trace=use_trace, - rtol=rtol, - atol=atol, - ) self.assertTrue(isinstance(onnx_model, onnx.ModelProto)) @parameterized.expand(TESTS_ORT) diff --git a/tests/test_integration_sliding_window.py b/tests/test_integration_sliding_window.py index 8b53e94941..2a1d52ebbc 100644 --- a/tests/test_integration_sliding_window.py +++ b/tests/test_integration_sliding_window.py @@ -26,7 +26,7 @@ from monai.networks import eval_mode, predict_segmentation from monai.networks.nets import UNet from monai.transforms import EnsureChannelFirst, SaveImage -from monai.utils import pytorch_after, set_determinism +from monai.utils import set_determinism from tests.utils import DistTestCase, TimedCall, make_nifti_image, skip_if_quick @@ -55,11 +55,8 @@ def _sliding_window_processor(_engine, batch): return predict_segmentation(seg_probs) def save_func(engine): - if pytorch_after(1, 9, 1): - for m in engine.state.output: - saver(m) - else: - saver(engine.state.output[0]) + for m in engine.state.output: + saver(m) infer_engine = Engine(_sliding_window_processor) infer_engine.add_event_handler(Events.ITERATION_COMPLETED, save_func) diff --git a/tests/test_integration_workflows.py b/tests/test_integration_workflows.py index fafb66f675..a166790ece 100644 --- a/tests/test_integration_workflows.py +++ b/tests/test_integration_workflows.py @@ -54,7 +54,7 @@ ) from monai.utils import optional_import, set_determinism from tests.testing_data.integration_answers import test_integration_value -from tests.utils import DistTestCase, TimedCall, assert_allclose, pytorch_after, skip_if_quick +from tests.utils import DistTestCase, TimedCall, assert_allclose, skip_if_quick SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter") @@ -149,7 +149,7 @@ def _forward_completed(self, engine): val_handlers=val_handlers, amp=bool(amp), to_kwargs={"memory_format": torch.preserve_format}, - amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {}, + amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) train_postprocessing = Compose( @@ -205,7 +205,7 @@ def _model_completed(self, engine): amp=bool(amp), optim_set_to_none=True, to_kwargs={"memory_format": torch.preserve_format}, - amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {}, + amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32}, ) trainer.run() diff --git a/tests/test_meta_tensor.py b/tests/test_meta_tensor.py index 60b6019703..ebeb7c1199 100644 --- a/tests/test_meta_tensor.py +++ b/tests/test_meta_tensor.py @@ -17,7 +17,6 @@ import string import tempfile import unittest -import warnings from copy import deepcopy from multiprocessing.reduction import ForkingPickler @@ -33,7 +32,6 @@ from monai.data.utils import decollate_batch, list_data_collate from monai.transforms import BorderPadd, Compose, DivisiblePadd, FromMetaTensord, ToMetaTensord from monai.utils.enums import PostFix -from monai.utils.module import pytorch_after from tests.utils import TEST_DEVICES, SkipIfBeforePyTorchVersion, assert_allclose, skip_if_no_cuda DTYPES = [[torch.float32], [torch.float64], [torch.float16], [torch.int64], [torch.int32], [None]] @@ -241,14 +239,6 @@ def test_torchscript(self, device): traced_fn = torch.jit.load(fname) out = traced_fn(im) self.assertIsInstance(out, torch.Tensor) - if not isinstance(out, MetaTensor) and not pytorch_after(1, 9, 1): - warnings.warn( - "When calling `nn.Module(MetaTensor) on a module traced with " - "`torch.jit.trace`, your version of pytorch returns a " - "`torch.Tensor` instead of a `MetaTensor`. Consider upgrading " - "your pytorch version if this is important to you." - ) - im_conv = im_conv.as_tensor() self.check(out, im_conv, ids=False) def test_pickling(self): @@ -257,9 +247,6 @@ def test_pickling(self): fname = os.path.join(tmp_dir, "im.pt") torch.save(m, fname) m2 = torch.load(fname) - if not isinstance(m2, MetaTensor) and not pytorch_after(1, 8, 1): - warnings.warn("Old version of pytorch. pickling converts `MetaTensor` to `torch.Tensor`.") - m = m.as_tensor() self.check(m2, m, ids=False) @skip_if_no_cuda @@ -556,11 +543,10 @@ def test_array_function(self, device="cpu", dtype=float): ) assert_allclose(np.argwhere(c == 1.0).astype(int).tolist(), [[0]]) assert_allclose(np.concatenate([c, c]), np.asarray([1.0, 2.0, 3.0, 1.0, 2.0, 3.0])) - if pytorch_after(1, 8, 1): - assert_allclose(c > np.asarray([1.0, 1.0, 1.0]), np.asarray([False, True, True])) - assert_allclose( - c > torch.as_tensor([1.0, 1.0, 1.0], device=device), torch.as_tensor([False, True, True], device=device) - ) + assert_allclose(c > np.asarray([1.0, 1.0, 1.0]), np.asarray([False, True, True])) + assert_allclose( + c > torch.as_tensor([1.0, 1.0, 1.0], device=device), torch.as_tensor([False, True, True], device=device) + ) @parameterized.expand(TESTS) def test_numpy(self, device=None, dtype=None): diff --git a/tests/test_resize.py b/tests/test_resize.py index d4c57e2742..0b22c82b75 100644 --- a/tests/test_resize.py +++ b/tests/test_resize.py @@ -27,7 +27,6 @@ SkipIfAtLeastPyTorchVersion, assert_allclose, is_tf32_env, - pytorch_after, ) TEST_CASE_0 = [{"spatial_size": 15}, (6, 10, 15)] @@ -71,7 +70,7 @@ def test_unchange(self): ((32, 32), "area", False), ((32, 32, 32), "trilinear", True), ((256, 256), "bilinear", False), - ((256, 256), "nearest-exact" if pytorch_after(1, 11) else "nearest", False), + ((256, 256), "nearest-exact", False), ((128, 128), "nearest", False), ((128, 64), "area", True), # already in a good shape ] diff --git a/tests/test_resize_with_pad_or_crop.py b/tests/test_resize_with_pad_or_crop.py index daf257f89f..fe7d85e6b4 100644 --- a/tests/test_resize_with_pad_or_crop.py +++ b/tests/test_resize_with_pad_or_crop.py @@ -20,19 +20,19 @@ from monai.data.meta_tensor import MetaTensor from monai.transforms import ResizeWithPadOrCrop from monai.transforms.lazy.functional import apply_pending -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASES = [ [{"spatial_size": [15, 8, 8], "mode": "constant"}, (3, 8, 8, 4), (3, 15, 8, 8), True], [{"spatial_size": [15, 4, -1], "mode": "constant"}, (3, 8, 8, 4), (3, 15, 4, 4), True], [ - {"spatial_size": [15, 4, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, + {"spatial_size": [15, 4, -1], "mode": "reflect"}, (3, 8, 8, 4), (3, 15, 4, 4), True, ], [ - {"spatial_size": [-1, -1, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, + {"spatial_size": [-1, -1, -1], "mode": "reflect"}, (3, 8, 8, 4), (3, 8, 8, 4), True, diff --git a/tests/test_resize_with_pad_or_cropd.py b/tests/test_resize_with_pad_or_cropd.py index 391e0feb22..914890779a 100644 --- a/tests/test_resize_with_pad_or_cropd.py +++ b/tests/test_resize_with_pad_or_cropd.py @@ -22,18 +22,18 @@ from monai.transforms import ResizeWithPadOrCropd from monai.transforms.lazy.functional import apply_pending from tests.test_resize_with_pad_or_crop import TESTS_PENDING_MODE -from tests.utils import TEST_NDARRAYS_ALL, assert_allclose, pytorch_after +from tests.utils import TEST_NDARRAYS_ALL, assert_allclose TEST_CASES = [ [{"keys": "img", "spatial_size": [15, 8, 8], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 8, 8)], [{"keys": "img", "spatial_size": [15, 4, -1], "mode": "constant"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 4, 4)], [ - {"keys": "img", "spatial_size": [15, 4, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, + {"keys": "img", "spatial_size": [15, 4, -1], "mode": "reflect"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 15, 4, 4), ], [ - {"keys": "img", "spatial_size": [-1, -1, -1], "mode": "reflect" if pytorch_after(1, 11) else "constant"}, + {"keys": "img", "spatial_size": [-1, -1, -1], "mode": "reflect"}, {"img": np.zeros((3, 8, 8, 4))}, (3, 8, 8, 4), ], diff --git a/tests/test_swin_unetr.py b/tests/test_swin_unetr.py index 5b33475c7e..ddc422206a 100644 --- a/tests/test_swin_unetr.py +++ b/tests/test_swin_unetr.py @@ -24,21 +24,14 @@ from monai.networks.nets.swin_unetr import PatchMerging, PatchMergingV2, SwinUNETR, filter_swinunetr from monai.networks.utils import copy_model_state from monai.utils import optional_import -from tests.utils import ( - assert_allclose, - pytorch_after, - skip_if_downloading_fails, - skip_if_no_cuda, - skip_if_quick, - testing_data_config, -) +from tests.utils import assert_allclose, skip_if_downloading_fails, skip_if_no_cuda, skip_if_quick, testing_data_config einops, has_einops = optional_import("einops") TEST_CASE_SWIN_UNETR = [] case_idx = 0 test_merging_mode = ["mergingv2", "merging", PatchMerging, PatchMergingV2] -checkpoint_vals = [True, False] if pytorch_after(1, 11) else [False] +checkpoint_vals = [True, False] for attn_drop_rate in [0.4]: for in_channels in [1]: for depth in [[2, 1, 1, 1], [1, 2, 1, 1]]: diff --git a/tests/testing_data/integration_answers.py b/tests/testing_data/integration_answers.py index e02b9ae995..1dbab8e544 100644 --- a/tests/testing_data/integration_answers.py +++ b/tests/testing_data/integration_answers.py @@ -70,366 +70,6 @@ ], } }, - { # test answers for PyTorch 1.12.1 - "integration_classification_2d": { - "losses": [0.776835828070428, 0.1615355300011149, 0.07492854832938523, 0.04591309238865877], - "best_metric": 0.9999184380485994, - "infer_prop": [1029, 896, 980, 1033, 961, 1046], - }, - "integration_segmentation_3d": { - "losses": [ - 0.5428894340991974, - 0.47331981360912323, - 0.4482289582490921, - 0.4452722787857056, - 0.4289989799261093, - 0.4359133839607239, - ], - "best_metric": 0.933259129524231, - "infer_metric": 0.9332860708236694, - "output_sums": [ - 0.142167581604417, - 0.15195543400875847, - 0.1512754523215521, - 0.13962938779108452, - 0.18835719348918614, - 0.16943498693483486, - 0.1465709827477569, - 0.16806483607477135, - 0.1568844609697224, - 0.17911090857818554, - 0.16252098157181355, - 0.16806016936625395, - 0.14430124467305516, - 0.11316135548315168, - 0.16183771025615476, - 0.2009426314066978, - 0.1760258010156966, - 0.09700864497950844, - 0.1938495370314683, - 0.20319147575335647, - 0.19629641404249798, - 0.20852344793102826, - 0.16185073630020633, - 0.13184196857669161, - 0.1480959525354053, - 0.14232924377085415, - 0.23177739882790951, - 0.16094610375534632, - 0.14832771888168225, - 0.10259365443625812, - 0.11850632233099603, - 0.1294100326098242, - 0.11364228279017609, - 0.15181947897584674, - 0.16319358155815072, - 0.1940284526521386, - 0.22306137879066443, - 0.18083137638759522, - 0.1903135237574692, - 0.07402317520619131, - ], - }, - "integration_workflows": { - "best_metric": 0.9219646483659745, - "infer_metric": 0.921751058101654, - "output_sums": [ - 0.14183664321899414, - 0.1513957977294922, - 0.13804054260253906, - 0.13356828689575195, - 0.18456125259399414, - 0.16363763809204102, - 0.14090299606323242, - 0.16649389266967773, - 0.15651893615722656, - 0.17655134201049805, - 0.16116666793823242, - 0.1644763946533203, - 0.14383649826049805, - 0.11055326461791992, - 0.16080379486083984, - 0.19629907608032227, - 0.17441415786743164, - 0.053577423095703125, - 0.19043779373168945, - 0.19904804229736328, - 0.19526052474975586, - 0.20304107666015625, - 0.16030025482177734, - 0.13170623779296875, - 0.15118932723999023, - 0.13686418533325195, - 0.22668886184692383, - 0.1611471176147461, - 0.1472463607788086, - 0.10427379608154297, - 0.11962461471557617, - 0.1305704116821289, - 0.11204910278320312, - 0.15171337127685547, - 0.15962505340576172, - 0.18976259231567383, - 0.21649456024169922, - 0.17761802673339844, - 0.18516874313354492, - 0.03636503219604492, - ], - "best_metric_2": 0.9219559609889985, - "infer_metric_2": 0.9217371672391892, - "output_sums_2": [ - 0.14187288284301758, - 0.15140819549560547, - 0.13802719116210938, - 0.1335887908935547, - 0.18454980850219727, - 0.1636652946472168, - 0.14091157913208008, - 0.16653108596801758, - 0.15651702880859375, - 0.17658615112304688, - 0.1611957550048828, - 0.16448307037353516, - 0.14385128021240234, - 0.1105203628540039, - 0.16085100173950195, - 0.19626951217651367, - 0.17442035675048828, - 0.053586483001708984, - 0.19042730331420898, - 0.1990523338317871, - 0.1952815055847168, - 0.20303773880004883, - 0.16034317016601562, - 0.13172531127929688, - 0.15118741989135742, - 0.1368694305419922, - 0.22667837142944336, - 0.16119050979614258, - 0.14726591110229492, - 0.10426473617553711, - 0.11961841583251953, - 0.13054800033569336, - 0.11203193664550781, - 0.15172529220581055, - 0.15963029861450195, - 0.18975019454956055, - 0.21646499633789062, - 0.17763566970825195, - 0.18517112731933594, - 0.03638744354248047, - ], - }, - }, - { # test answers for cuda 10.x - "integration_classification_2d": { - "losses": [0.777176220515731, 0.16019743723664315, 0.07480076164197011, 0.045643698364780966], - "best_metric": 0.9999418774120775, - "infer_prop": [1030, 897, 980, 1033, 960, 1048], - }, - "integration_segmentation_3d": { - "losses": [ - 0.5326887160539627, - 0.4685510128736496, - 0.46245276033878324, - 0.4411882758140564, - 0.4198471873998642, - 0.43021280467510226, - ], - "best_metric": 0.931993305683136, - "infer_metric": 0.9326668977737427, - "output_sums": [ - 0.1418775228871769, - 0.15188869120317386, - 0.15140863737688195, - 0.1396146850007127, - 0.18784343811575696, - 0.16909487431163164, - 0.14649608249452073, - 0.1677767130878611, - 0.1568122289811143, - 0.17874181729735056, - 0.16213703658980205, - 0.16754335171970686, - 0.14444824920997243, - 0.11432402622850306, - 0.16143210936221247, - 0.20055289634107482, - 0.17543571757219317, - 0.09920729163334538, - 0.19297325815057875, - 0.2023200127892273, - 0.1956677579845722, - 0.20774045016425718, - 0.16193278944159428, - 0.13174198906539808, - 0.14830508550670007, - 0.14241105864278342, - 0.23090631643085724, - 0.16056153813499532, - 0.1480353269419819, - 0.10318719171632634, - 0.11867462580989198, - 0.12997011485830187, - 0.11401220332210203, - 0.15242746700662088, - 0.1628489107974574, - 0.19327235354175412, - 0.22184902863377548, - 0.18028049625972334, - 0.18958059106892552, - 0.07884601267057013, - ], - }, - "integration_workflows": { - "best_metric": 0.9217087924480438, - "infer_metric": 0.9214379042387009, - "output_sums": [ - 0.14209461212158203, - 0.15126705169677734, - 0.13800382614135742, - 0.1338181495666504, - 0.1850571632385254, - 0.16372442245483398, - 0.14059066772460938, - 0.16674423217773438, - 0.15653657913208008, - 0.17690563201904297, - 0.16154909133911133, - 0.16521310806274414, - 0.14388608932495117, - 0.1103353500366211, - 0.1609959602355957, - 0.1967010498046875, - 0.1746964454650879, - 0.05329275131225586, - 0.19098854064941406, - 0.19976520538330078, - 0.19576644897460938, - 0.20346736907958984, - 0.1601848602294922, - 0.1316051483154297, - 0.1511220932006836, - 0.13670969009399414, - 0.2276287078857422, - 0.1611800193786621, - 0.14751672744750977, - 0.10413789749145508, - 0.11944007873535156, - 0.1305546760559082, - 0.11204719543457031, - 0.15145111083984375, - 0.16007614135742188, - 0.1904129981994629, - 0.21741962432861328, - 0.17812013626098633, - 0.18587207794189453, - 0.03605222702026367, - ], - "best_metric_2": 0.9210659921169281, - "infer_metric_2": 0.9208109736442566, - "output_sums_2": [ - 0.14227628707885742, - 0.1515035629272461, - 0.13819408416748047, - 0.13402271270751953, - 0.18525266647338867, - 0.16388607025146484, - 0.14076614379882812, - 0.16694307327270508, - 0.15677356719970703, - 0.1771831512451172, - 0.16172313690185547, - 0.1653728485107422, - 0.14413118362426758, - 0.11057281494140625, - 0.16121912002563477, - 0.19680166244506836, - 0.1748638153076172, - 0.053426265716552734, - 0.19117307662963867, - 0.19996356964111328, - 0.1959366798400879, - 0.20363712310791016, - 0.16037797927856445, - 0.13180780410766602, - 0.1513657569885254, - 0.13686084747314453, - 0.2277364730834961, - 0.16137409210205078, - 0.1476879119873047, - 0.10438394546508789, - 0.11967992782592773, - 0.13080739974975586, - 0.11226606369018555, - 0.15168476104736328, - 0.1602616310119629, - 0.190582275390625, - 0.21756458282470703, - 0.17825984954833984, - 0.18604803085327148, - 0.036206722259521484, - ], - }, - }, - { # test answers for PyTorch 1.9 - "integration_workflows": { - "output_sums_2": [ - 0.14213180541992188, - 0.15153264999389648, - 0.13801145553588867, - 0.1338348388671875, - 0.18515968322753906, - 0.16404008865356445, - 0.14110612869262695, - 0.16686391830444336, - 0.15673542022705078, - 0.1772594451904297, - 0.16174745559692383, - 0.16518878936767578, - 0.1440296173095703, - 0.11033201217651367, - 0.1611781120300293, - 0.19660568237304688, - 0.17468547821044922, - 0.053053855895996094, - 0.1909656524658203, - 0.19952869415283203, - 0.1957845687866211, - 0.2034916877746582, - 0.16042661666870117, - 0.13193607330322266, - 0.15104389190673828, - 0.13695430755615234, - 0.22720861434936523, - 0.16157913208007812, - 0.14759159088134766, - 0.10379791259765625, - 0.11937189102172852, - 0.1306462287902832, - 0.11205482482910156, - 0.15182113647460938, - 0.16006708145141602, - 0.19011592864990234, - 0.21713829040527344, - 0.17794132232666016, - 0.18584394454956055, - 0.03577899932861328, - ] - }, - "integration_segmentation_3d": { # for the mixed readers - "losses": [ - 0.5645154356956482, - 0.4984356611967087, - 0.472334086894989, - 0.47419720590114595, - 0.45881829261779783, - 0.43097741305828097, - ], - "best_metric": 0.9325698614120483, - "infer_metric": 0.9326590299606323, - }, - }, { # test answers for PyTorch 1.13 "integration_workflows": { "output_sums_2": [