diff --git a/README.md b/README.md index dc9a60dc..ce8b6684 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,35 @@ pip install -e .[dev] #### Test with `pytest` -To run tests on itwinai package: +Do this only if you are a developer wanting to test your code with pytest. + +First, you need to create virtual environments both for torch and tensorflow. +For instance, you can use: + +```bash +make torch-cpu +make make tf-2.13-cpu +``` + +To select the name of the torch and tf environments you can set the following +environment variables, which allow to run the tests in environments with +custom names which are different from `.venv-pytorch` and `.venv-tf`. + +```bash +export TORCH_ENV="my_torch_env" +export TF_ENV="my_tf_env" +``` + +Functional tests (marked with `pytest.mark.functional`) will be executed under +`/tmp/pytest` location to guarantee they are run in a clean environment. + +To run functional tests use: + +```bash +pytest -v tests/ -m "functional" +``` + +To run all tests on itwinai package: ```bash # Activate env diff --git a/tests/use-cases/conftest.py b/tests/use-cases/conftest.py index eccdc208..69229db6 100644 --- a/tests/use-cases/conftest.py +++ b/tests/use-cases/conftest.py @@ -2,6 +2,8 @@ from typing import Callable import pytest import subprocess +import random +import string FNAMES = [ @@ -10,6 +12,24 @@ ] +def rnd_string(len: int = 26): + return ''.join(random.sample(string.ascii_lowercase, len)) + + +@pytest.fixture +def tmp_test_dir(): + root = '/tmp/pytest' + os.makedirs(root, exist_ok=True) + test_dir = os.path.join(root, rnd_string()) + while os.path.exists(test_dir): + test_dir = os.path.join(root, rnd_string()) + os.makedirs(test_dir, exist_ok=True) + + yield test_dir + + # Optional: remove dir here... + + @pytest.fixture def torch_env() -> str: """ @@ -21,7 +41,7 @@ def torch_env() -> str: env_p = './.venv-pytorch' else: env_p = os.environ.get('TORCH_ENV') - return os.path.join(os.getcwd(), env_p) + return os.path.abspath(env_p) @pytest.fixture @@ -35,7 +55,7 @@ def tf_env() -> str: env_p = './.venv-tf' else: env_p = os.environ.get('TF_ENV') - return os.path.join(os.getcwd(), env_p) + return os.path.abspath(env_p) @pytest.fixture diff --git a/tests/use-cases/test_3dgan.py b/tests/use-cases/test_3dgan.py index 3ec84d48..9d19d1f3 100644 --- a/tests/use-cases/test_3dgan.py +++ b/tests/use-cases/test_3dgan.py @@ -3,9 +3,10 @@ """ import pytest import subprocess +import os CERN_PATH = "use-cases/3dgan" -CKPT_PATH = "3dgan-inference.pth" +CKPT_NAME = "3dgan-inference.pth" @pytest.mark.skip("deprecated") @@ -15,23 +16,25 @@ def test_structure_3dgan(check_folder_structure): @pytest.mark.functional -def test_3dgan_train(torch_env, install_requirements): +def test_3dgan_train(torch_env, tmp_test_dir, install_requirements): """ Test 3DGAN torch lightning trainer by running it end-to-end. """ install_requirements(CERN_PATH, torch_env) + conf = os.path.join(os.path.abspath(CERN_PATH), 'pipeline.yaml') trainer_params = "pipeline.init_args.steps.training_step.init_args" cmd = (f"{torch_env}/bin/itwinai exec-pipeline " - f"--config pipeline.yaml " + f"--config {conf} " f'-o {trainer_params}.config.trainer.accelerator=cpu ' f'-o {trainer_params}.config.trainer.strategy=auto ' ) - subprocess.run(cmd.split(), check=True, cwd=CERN_PATH) + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) @pytest.mark.functional def test_3dgan_inference( torch_env, + tmp_test_dir, install_requirements, # fake_model_checkpoint ): @@ -41,10 +44,15 @@ def test_3dgan_inference( install_requirements(CERN_PATH, torch_env) # Create fake inference dataset and checkpoint - cmd = f"{torch_env}/bin/python create_inference_sample.py" - subprocess.run(cmd.split(), check=True, cwd=CERN_PATH) + exec = os.path.join(os.path.abspath(CERN_PATH), + 'create_inference_sample.py') + cmd = (f"{torch_env}/bin/python {exec} " + f"--root {tmp_test_dir} " + f"--ckpt-name {CKPT_NAME}") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) # Test inference + conf = os.path.join(os.path.abspath(CERN_PATH), 'inference-pipeline.yaml') getter_params = "pipeline.init_args.steps.dataloading_step.init_args" trainer_params = "pipeline.init_args.steps.inference_step.init_args" logger_params = trainer_params + ".config.trainer.logger.init_args" @@ -52,10 +60,10 @@ def test_3dgan_inference( saver_params = "pipeline.init_args.steps.saver_step.init_args" cmd = ( f'{torch_env}/bin/itwinai exec-pipeline ' - '--config inference-pipeline.yaml ' + f'--config {conf} ' f'-o {getter_params}.data_path=exp_data ' - f'-o {trainer_params}.model.init_args.model_uri={CKPT_PATH} ' - f'-o {trainer_params}.config.trainer.accelerator=cpu ' + f'-o {trainer_params}.model.init_args.model_uri={CKPT_NAME} ' + f'-o {trainer_params}.config.trainer.accelerator=auto ' f'-o {trainer_params}.config.trainer.strategy=auto ' f'-o {logger_params}.save_dir=ml_logs/mlflow_logs ' f'-o {data_params}.datapath=exp_data/*/*.h5 ' diff --git a/tests/use-cases/test_cyclones.py b/tests/use-cases/test_cyclones.py index 6b262d45..25fa0bdf 100644 --- a/tests/use-cases/test_cyclones.py +++ b/tests/use-cases/test_cyclones.py @@ -7,6 +7,7 @@ import pytest import subprocess +import os CYCLONES_PATH = "use-cases/cyclones" @@ -19,12 +20,13 @@ def test_structure_cyclones(check_folder_structure): @pytest.mark.functional @pytest.mark.memory_heavy -def test_cyclones_train_tf(tf_env, install_requirements): +def test_cyclones_train_tf(tf_env, tmp_test_dir, install_requirements): """ Test Cyclones tensorflow trainer by running it end-to-end. """ # TODO: create a small sample dataset for tests only install_requirements(CYCLONES_PATH, tf_env) + pipe = os.path.join(os.path.abspath(CYCLONES_PATH), 'pipeline.yaml') cmd = (f"{tf_env}/bin/python train.py " - f"-p pipeline.yaml") - subprocess.run(cmd.split(), check=True, cwd=CYCLONES_PATH) + f"-p {pipe}") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) diff --git a/tests/use-cases/test_mnist.py b/tests/use-cases/test_mnist.py index b39eb1a8..1f18a8e6 100644 --- a/tests/use-cases/test_mnist.py +++ b/tests/use-cases/test_mnist.py @@ -7,6 +7,7 @@ import pytest import subprocess +import os # from itwinai.cli import exec_pipeline TORCH_PATH = "use-cases/mnist/torch" @@ -33,7 +34,7 @@ def test_structure_mnist_tf(check_folder_structure): @pytest.mark.functional -def test_mnist_train_torch(torch_env, install_requirements): +def test_mnist_train_torch(torch_env, tmp_test_dir, install_requirements): """ Test MNIST torch native trainer by running it end-to-end. @@ -42,13 +43,14 @@ def test_mnist_train_torch(torch_env, install_requirements): >>> export TORCH_ENV="my_env" """ install_requirements(TORCH_PATH, torch_env) + conf = os.path.join(os.path.abspath(TORCH_PATH), 'config.yaml') cmd = (f"{torch_env}/bin/itwinai exec-pipeline " - f"--config config.yaml --pipe-key training_pipeline") - subprocess.run(cmd.split(), check=True, cwd=TORCH_PATH) + f"--config {conf} --pipe-key training_pipeline") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) @pytest.mark.functional -def test_mnist_inference_torch(torch_env, install_requirements): +def test_mnist_inference_torch(torch_env, tmp_test_dir, install_requirements): """ Test MNIST torch native inference by running it end-to-end. @@ -59,17 +61,25 @@ def test_mnist_inference_torch(torch_env, install_requirements): install_requirements(TORCH_PATH, torch_env) # Create fake inference dataset and checkpoint - cmd = f"{torch_env}/bin/python create_inference_sample.py" - subprocess.run(cmd.split(), check=True, cwd=TORCH_PATH) + exec = os.path.join(os.path.abspath(TORCH_PATH), + 'create_inference_sample.py') + cmd = (f"{torch_env}/bin/python {exec} " + f"--root {tmp_test_dir}") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) # Test inference + conf = os.path.join(os.path.abspath(TORCH_PATH), 'config.yaml') cmd = (f"{torch_env}/bin/itwinai exec-pipeline " - f"--config config.yaml --pipe-key inference_pipeline") - subprocess.run(cmd.split(), check=True, cwd=TORCH_PATH) + f"--config {conf} --pipe-key inference_pipeline") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) @pytest.mark.functional -def test_mnist_train_torch_lightning(torch_env, install_requirements): +def test_mnist_train_torch_lightning( + torch_env, + tmp_test_dir, + install_requirements +): """ Test MNIST torch lightning trainer by running it end-to-end. @@ -77,18 +87,20 @@ def test_mnist_train_torch_lightning(torch_env, install_requirements): >>> export TORCH_ENV="my_env" """ - install_requirements(TORCH_PATH, torch_env) + install_requirements(LIGHTNING_PATH, torch_env) + conf = os.path.join(os.path.abspath(LIGHTNING_PATH), 'config.yaml') cmd = (f"{torch_env}/bin/itwinai exec-pipeline " - f"--config config.yaml --pipe-key training_pipeline") - subprocess.run(cmd.split(), check=True, cwd=LIGHTNING_PATH) + f"--config {conf} --pipe-key training_pipeline") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) @pytest.mark.functional -def test_mnist_train_tf(tf_env, install_requirements): +def test_mnist_train_tf(tf_env, tmp_test_dir, install_requirements): """ Test MNIST tensorflow trainer by running it end-to-end. """ install_requirements(TF_PATH, tf_env) + conf = os.path.join(os.path.abspath(TF_PATH), 'pipeline.yaml') cmd = (f"{tf_env}/bin/itwinai exec-pipeline " - f"--config pipeline.yaml --pipe-key pipeline") - subprocess.run(cmd.split(), check=True, cwd=TF_PATH) + f"--config {conf} --pipe-key pipeline") + subprocess.run(cmd.split(), check=True, cwd=tmp_test_dir) diff --git a/tfrecords/.DS_Store b/tfrecords/.DS_Store new file mode 100644 index 00000000..c1aed874 Binary files /dev/null and b/tfrecords/.DS_Store differ diff --git a/use-cases/3dgan/create_inference_sample.py b/use-cases/3dgan/create_inference_sample.py index 366bc672..4bcc0d99 100644 --- a/use-cases/3dgan/create_inference_sample.py +++ b/use-cases/3dgan/create_inference_sample.py @@ -1,10 +1,23 @@ """Create a simple inference dataset sample and a checkpoint.""" +import argparse +import os import torch +from model import ThreeDGAN -CKPT_PATH = "3dgan-inference.pth" -if __name__ == "__main__": - from model import ThreeDGAN +def create_checkpoint( + root: str = '.', + ckpt_name: str = "3dgan-inference.pth" +): + ckpt_path = os.path.join(root, ckpt_name) net = ThreeDGAN() - torch.save(net, CKPT_PATH) + torch.save(net, ckpt_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--root", type=str, default='.') + parser.add_argument("--ckpt-name", type=str, default="3dgan-inference.pth") + args = parser.parse_args() + create_checkpoint(vars(args)) diff --git a/use-cases/3dgan/dataloader.py b/use-cases/3dgan/dataloader.py index 89234895..b4dcc096 100644 --- a/use-cases/3dgan/dataloader.py +++ b/use-cases/3dgan/dataloader.py @@ -35,7 +35,8 @@ def execute(self): gdown.download_folder( url=self.data_url, quiet=False, - output=self.data_path + output=self.data_path, + verify=False ) diff --git a/use-cases/cyclones/dataloader.py b/use-cases/cyclones/dataloader.py index 7f224157..3cf1d97b 100644 --- a/use-cases/cyclones/dataloader.py +++ b/use-cases/cyclones/dataloader.py @@ -180,6 +180,7 @@ def setup_config(self, config: Dict) -> None: if not exists(join(root_dir, self.data_path)): gdown.download_folder( url=self.data_url, quiet=False, + verify=False, output=join(root_dir, self.data_path) ) diff --git a/use-cases/mnist/torch/create_inference_sample.py b/use-cases/mnist/torch/create_inference_sample.py index 2b03f610..1c588c48 100644 --- a/use-cases/mnist/torch/create_inference_sample.py +++ b/use-cases/mnist/torch/create_inference_sample.py @@ -2,6 +2,7 @@ import torch import os +import argparse from model import Net from dataloader import InferenceMNIST @@ -31,4 +32,11 @@ def mnist_torch_inference_files( if __name__ == "__main__": - mnist_torch_inference_files() + parser = argparse.ArgumentParser() + parser.add_argument("--root", type=str, default='.') + parser.add_argument("--samples-path", type=str, + default='mnist-sample-data') + parser.add_argument("--model-name", type=str, + default='mnist-pre-trained.pth') + args = parser.parse_args() + mnist_torch_inference_files(**vars(args))