From 28da5421342fed2bcc4a7f55ec53b409e58f646d Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Wed, 7 Dec 2022 16:16:55 +0530 Subject: [PATCH 01/47] Intial Commit Nodule Detection --- .../lung_nodule_detection/.gitignore | 3 + .../lung_nodule_detection/ReadMe.md | 114 +++++ .../configs/download_config.json | 18 + .../configs/network_config.json | 4 + .../lung_nodule_detection/init_venv.sh | 29 ++ .../lung_nodule_detection/requirements.txt | 9 + .../lung_nodule_detection/setup.py | 5 + .../src/data_prep/prep_pack/__init__.py | 0 .../src/data_prep/prep_pack/create_folds.py | 291 ++++++++++++ .../data_prep/prep_pack/generate_patches.py | 394 ++++++++++++++++ .../data_prep/prep_pack/generate_slices.py | 178 ++++++++ .../src/data_prep/prep_pack/visualize.py | 46 ++ .../src/data_prep/prepare_data.py | 102 +++++ .../src/inference/infer_pack/data_loader.py | 127 ++++++ .../inference/infer_pack/infer_lung_seg.py | 192 ++++++++ .../infer_pack/infer_patch_classifier.py | 92 ++++ .../src/inference/infer_pack/lenet.py | 36 ++ .../src/inference/infer_pack/r2unet.py | 425 ++++++++++++++++++ .../src/inference/infer_pack/sumnet_bn_vgg.py | 86 ++++ .../src/inference/infer_pack/utils.py | 28 ++ .../src/inference/inference.py | 42 ++ .../src/training/train_network.py | 71 +++ .../src/training/train_pack/__init__.py | 1 + .../src/training/train_pack/data_loader.py | 130 ++++++ .../src/training/train_pack/lenet.py | 36 ++ .../src/training/train_pack/lung_seg.py | 252 +++++++++++ .../src/training/train_pack/lung_seg_adv.py | 330 ++++++++++++++ .../training/train_pack/patch_classifier.py | 220 +++++++++ .../src/training/train_pack/r2unet.py | 425 ++++++++++++++++++ .../src/training/train_pack/sumnet_bn_vgg.py | 86 ++++ .../src/training/train_pack/utils.py | 28 ++ .../tests/test_export.py | 0 .../tests/test_inference.py | 0 .../lung_nodule_detection/tests/test_train.py | 0 34 files changed, 3800 insertions(+) create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/.gitignore create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/requirements.txt create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/setup.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/.gitignore b/misc/pytorch_toolkit/lung_nodule_detection/.gitignore new file mode 100644 index 00000000000..6a01be53f44 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/.gitignore @@ -0,0 +1,3 @@ +model_weights/* +test_data/* +temp_data/* \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md new file mode 100644 index 00000000000..833bfd14fe1 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md @@ -0,0 +1,114 @@ +# Lung Segmentation and Nodule Detection in Computed Tomography Scan using a Convolutional Neural Network Trained Adversarially using Turing Test Loss + +Lung cancer is the most common form of cancer found worldwide with a high mortality rate. Early detection of pulmonary nodules by screening with a low-dose computed tomography (CT) scan is crucial for its effective clinical management. Nodules which are symptomatic of malignancy occupy about 0.0125 - 0.025% of volume in a CT scan of a patient. Manual screening of all slices is a tedious task and presents a high risk of human errors. To tackle this problem we propose a computationally efficient two-stage framework. In the first stage, a convolutional neural network (CNN) trained adversarially using Turing test loss segments the lung region. +In the second stage, patches sampled from the segmented region are then classified to detect the presence of nodules. The proposed method is experimentally validated on the LUNA16 challenge dataset with a dice coefficient of **0.984 ± 0.0007** for 10-fold cross-validation. +**Paper** : [arXiv](https://arxiv.org/abs/2006.09308v1) +BibTeX reference to cite, if you use it: + +```bibtex +@inproceedings{Sathish2020LungSA, +title={Lung Segmentation and Nodule Detection in + Computed Tomography Scan using a Convolutional Neural Network + Trained Adversarially using Turing Test Loss}, + author={Rakshith Sathish and Rachana Sathish + and Ramanathan Sethuraman and Debdoot Sheet}, + year={2020} } +``` +## Dataset used + + The proposed method is experimentally validated by performing 10-fold cross-validation on the LUNA16 challenge dataset. + >Dataset download page: [https://luna16.grand-challenge.org/](https://luna16.grand-challenge.org/) + +The dataset consists of CT volumes from 880 subjects, provided as ten subsets for 10-fold cross-validation. In each fold of the experiment, eight subsets from the dataset were used for training and one each for validation and testing. The annotations provided includes binary masks for lung segmentation and, coordinates and spherical diameter of nodules present in each slice. LIDC-IDRI dataset from which LUNA16 is derived has nodule annotations in the form of contours which preserves its actual shape. Therefore, we use annotations from LUNA dataset only in Stage 1. The annotations for the nodules from the LIDC dataset is used in Stage 2 (nodule detection) to determine the presence of nodules in image patches. +> Dataset download page: [https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI](https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI) + +The ground truth annotations were marked in a two-phase image annotation process performed by four experienced thoracic radiologists. Systematic sampling of slices from the CT volumes was performed to ensure equal distribution of slices with and without the presence of nodules. + +>**Note**: Systematically sampled slice numbers/images to be used are given in the repository inside the data preparation folder. + +>**License**: Both the datasets are published by the creators under [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/) + +# Using the code + +## Code Organization +Code directory is organised into 3 subfolders; Data preparation, Training and Evaluation. Each of these subfolders has a .py file and a package folder containing function definitions. +## Requirements + +Create a conda virtual environment with +``` +conda create --name --file requirements.txt +``` +This would create a virtual environment with all the necessary packages of the same version used during development. + +## Data preparation +Follow the below steps to prepare and organise the data for training. +> Details about the arguments being passed and its purpose is explained within the code. To see the details run `python prepare_data.py -h` + +> Make sure the dataset has been properly downloaded and extracted before proceeding. + +1. ` python prepare_data.py --genslices --masktype --datasetpath --save_path ` + This step extracts induvidual CT slices from the CT volumes provided in the dataset. Each of these slices are saved seperately as npy files with filename in the format `[series_uid]_slice[sliceno].npy`. + Perform the above step for masktype nodule and lung seperately before proceeding to the next step. + +2. `python prepare_data.py --createfolds --datapath --savepath +--datasetpath ` + The above step first classifies the slices into two categories, positive and negative based on the presence of nodules in them. On completion, the dataset which consists of CT volumes from 880 subjects, provided as ten subsets are divided into 10-folds for cross-validation. In each fold of the experiment, eight subsets from the dataset are separated for training and one each for validation and testing. A balanced dataset consisting of an equal number (approx.) of positive and negative slices is identified for each fold. Filenames of these slices of each fold are stored in separate JSON files. + +3. `python prepare_data.py --genpatch --jsonpath --foldno --category --data_path --lungsegpath --savepath --patchtype ` + The above step generates patches which are used to train the classifier network. + +4. `python prepare_data.py --visualize --seriesuid --sliceno --datapath --savepath ` + To visualize a particular slice use the above line. + +## Training + +> Details about the arguments being passed and its purpose is explained within the code. To see the details run `python train_network.py -h` + +To train the lung segmentation network without the discriminator execute the following line of code. +`python train_network.py --lungseg --foldno --savepath --jsonpath --datapath --lungsegpath --network --epochs ` + +To train the lung segmentation network with discrimator and turing test loss, execute +`python train_network.py --lungsegadv --foldno --savepath --jsonpath --datapath --lungsegpath --network --epochs ` + +To train the patch classifier network execute +`python patch_classifier --savepath --imgpath --epochs ` + + +## Evaluation + +> Details about the arguments being passed and its purpose is explained within the code. To see the details run `python inference.py -h` + +To evaluate the segmentation models execute + `python inference.py --lunseg --foldno --savepath --jsonpath --network ` + +To evaluate the classifier network execute +`python inference.py --patchclass --savepath --imgpath ` + +## Pre-trained Models +## Results +## Support + +If you face any issues while executing the codes, raise an issue with rakshith.sathish@gmail.com + +# Authors and Acknowledgment +- Rakshith Sathish +- Rachana Sathish +- Ramanathan Sethuraman +- Debdoot Sheet +> This work was supported through a research grant from Intel India Grand +Challenge 2016 for Project MIRIAD + + # License +Copyright [2020] [IITKLIV] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json new file mode 100644 index 00000000000..0c485aef63c --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json @@ -0,0 +1,18 @@ +{ + "stage1": { + "dest_path_data": "", + "url_data": "", + "url_model": "", + "dest_path_model": "" + }, + "stage2": { + "dest_path_data": "", + "url_data": "", + "url_model": "", + "dest_path_model": "" + }, + "test_data": { + "dest_path": "", + "drive_url": " " + } +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json new file mode 100644 index 00000000000..cafb1ab4589 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json @@ -0,0 +1,4 @@ +{"train":{}, +"inference":{}, +"export": {} +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh b/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh new file mode 100644 index 00000000000..2697b9bc2da --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +work_dir=$(realpath "$(dirname $0)") + +venv_dir=$1 +if [ -z "$venv_dir" ]; then + venv_dir=venv +fi + +cd ${work_dir} + +if [ -e venv ]; then + echo + echo "Virtualenv already exists. Use command to start working:" + echo "$ . venv/bin/activate" +fi + +virtualenv ${venv_dir} -p python3 --prompt="nodule" + +. ${venv_dir}/bin/activate + + +cat requirements.txt | xargs -n 1 -L 1 pip3 install + +pip install -e . + +echo +echo "Activate a virtual environment to start working:" +echo "$ . ${venv_dir}/bin/activate" diff --git a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt new file mode 100644 index 00000000000..b8611b0f8e2 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt @@ -0,0 +1,9 @@ +torch +torchvision +torchmetrics +numpy +openvino-dev[onnx]==2021.4.2 +onnxruntime==1.8.1 +wget +tqdm +pytest \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/setup.py b/misc/pytorch_toolkit/lung_nodule_detection/setup.py new file mode 100644 index 00000000000..a4b9e9ed8e9 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/setup.py @@ -0,0 +1,5 @@ +from setuptools import setup, find_packages + +setup(name='nodule', + version='1.0', + packages=find_packages()) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py new file mode 100644 index 00000000000..b3a3d31345d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python +# coding: utf-8 + +import numpy as np +import os +from natsort import natsorted +from tqdm import tqdm as tq +from collections import defaultdict +import json +import argparse + +def positive_negative_classifier(data_path,save_path): + """Classifies slices as positive and negative slices. + + If any non-zero value is present in the mask/GT of + the specified slice then it is classified as positive else negative. + + Parameters + ---------- + data_path: str + The folder location where masks/GT for slices are stored. + save_path: str + The folder location where output json files should be stored. + + Returns + ------- + None + + """ + + mask_path = data_path+'/mask/' + + file_list=natsorted(os.listdir(mask_path)) + positive_list,negative_list=[],[] + + for file in tq(file_list): + + try: + mask = np.load(mask_path+file) + if (np.any(mask)): + positive_list.append(file) + else: + negative_list.append(file) + # break + except: + print('Skipped %s ,Unable to locate corresponding mask') + continue + + + with open(save_path+'positive_slices.json', 'w') as f: + json.dump(positive_list, f) + with open(save_path+'negative_slices.json', 'w') as g: + json.dump(negative_list, g) + + + +def subset_classifier(dataset_path,save_path): + """ Classifies the slices according the subset of origin + + Parameters + ---------- + dataset_path: str + Folder location where dataset is present + + Returns + ------- + dict + A dictionary consisting of filename according to their subset. + """ + + dict_subset={} + dict_subset = defaultdict(lambda:[],dict_subset) + for i in range(10): + #Since 10 subsets are provided in the dataset. + for file in tq(os.listdir(dataset_path+'subset'+str(i))): + file_name=os.path.basename(file) + if file_name.endswith(".mhd"): + dict_subset['subset'+str(i)].append(file_name) + + with open(save_path+'subset_classification.json', 'w') as h: + json.dump(dict_subset, h) + + return dict_subset + + + +def assign_folds(dict_subset,save_path): + """ Divides subsets into train,validation and testing sets of corresponding folds + + Parameters + ---------- + dict_subset: dict + Dictionary which has the files and its corresponding subset + savepath: str + Folder location to save the result + + Returns + ------- + None + """ + + dataset_list=[dict_subset['subset0'],dict_subset['subset1'], + dict_subset['subset2'],dict_subset['subset3'], + dict_subset['subset4'],dict_subset['subset5'], + dict_subset['subset6'],dict_subset['subset7'], + dict_subset['subset8'],dict_subset['subset9']] + + for i in tq(range(10)): #10 Subsets in the dataset + + fold={} + fold = defaultdict(lambda:0,fold) + fold['train_set']=dataset_list[0-i]+dataset_list[1-i]+dataset_list[2-i]+dataset_list[3-i]+dataset_list[4-i]+dataset_list[5-i]+dataset_list[6-i]+dataset_list[7-i] + + fold['valid_set']=dataset_list[8-i] + fold['test_set']=dataset_list[9-i] + + + fold_name='fold'+str(i)+'_mhd.json' + with open(save_path+fold_name, 'w') as j: + json.dump(fold, j) + + +def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test): + + """Adds additional negative slices to the prepared datalist + + Parameters + ---------- + series_uid_npylist: nparray + fold_npy: dict + series_uid_train: list + sereies_uid_val: list + sereies_uid_test: list + + Returns + ------- + dict + + """ + + for i in (series_uid_train): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['train_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['train_set'].append(file) + + + for i in (series_uid_val): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['valid_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['valid_set'].append(file) + + + for i in (series_uid_test): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['test_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['test_set'].append(file) + + return fold_npy + +def create_balanced_dataset(save_path,data_path,additional=False): + + """Creates balanced dataset with equal positive and negative slices + + Parameters + ---------- + data_path: str + The folder location where image/.npy for slices are stored. + save_path: str + The folder location where output json files should be stored. + additional: Boolean,Optional + If True add additonal negative slices + + Returns + ------- + dict + Returns dict with equal positive and negative slices. + """ + + img_path = data_path+'/img/' + + with open(save_path+'positive_slices.json') as c: + pos_slices_json=json.load(c) + + + pos_list=[x.split('.mhd')[0] for x in pos_slices_json] + pos_list_uq=np.unique(np.array(pos_list)) + + + print('Sorting entire image set. Will take time.') + sorted_list=natsorted(os.listdir(img_path)) + + print('Sorting completed') + + + for i in tq(range(10)): + with open(save_path+'fold'+str(i)+'_mhd.json') as f: + j_data=json.load(f) + + pos_count=0 + neg_count=0 + count = {} + fold_npy={} + fold_npy = defaultdict(lambda:[],fold_npy) + series_uid_train=[x.split('.mhd')[0] for x in j_data['train_set']] + series_uid_val=[x.split('.mhd')[0] for x in j_data['valid_set']] + series_uid_test=[x.split('.mhd')[0] for x in j_data['test_set']] + fold_npy_name='fold'+str(i)+'_pos_neg_eq.json' + # series_uid_npylist=[x.split('_')[0] for x in npy_list] + # series_uid_npylist_uq=np.unique(np.array(series_uid_npylist)) + + for f,name in enumerate(sorted_list): + + for q in series_uid_train: + if q in name : + if name in pos_slices_json: + #pos_slices_json contains the list of all positive slices. + pos_count += 1 + fold_npy['train_set'].append(name) + elif pos_count>neg_count: + # Here the slice will be negative since 'name' not in pos_slices + neg_count += 1 + fold_npy['train_set'].append(name) + else: + continue + else: + continue + + + for q in series_uid_val: + + if q in name : + if name in pos_slices_json: + + pos_count += 1 + fold_npy['valid_set'].append(name) + elif pos_count>neg_count: + + neg_count += 1 + fold_npy['valid_set'].append(name) + else: + continue + else: + continue + + for q in series_uid_test: + if q in name : + if name in pos_slices_json: + pos_count += 1 + fold_npy['test_set'].append(name) + elif pos_count>neg_count: + neg_count += 1 + fold_npy['test_set'].append(name) + else: + continue + else: + continue + with open(save_path+fold_npy_name, 'w') as z: + json.dump(fold_npy,z) + + if additional == True: + fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) + + + + print('Balanced dataset generated and saved') + + return fold_npy diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py new file mode 100644 index 00000000000..57d99156da6 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python +# coding: utf-8 + +import pickle +import numpy as np +import os +import cv2 +from skimage.util.shape import view_as_windows +import json +from tqdm import tqdm as tq +import matplotlib.pyplot as plt +import random +from collections import defaultdict + +def generate_patchlist(save_path,patchtype,fold_no=0): + """Generates positive slices in each fold + + Parameters + ---------- + save_path: str + Folder location where jsons are to be stored + fold_no: int + Integer between 0-9 that specifies the fold + category: str + Positive/negative + + Returns + ------- + None + + """ + + with open(save_path+'fold'+str(fold_no)+'_pos_neg_eq.json') as file: + j_data = json.load(file) + with open(save_path+'/'+patchtype+'_slices.json') as c: + pos_slices_json=json.load(c) + + # print(pos_slices_json) + + train_set = j_data['train_set'] + valid_set = j_data['valid_set'] + test_set = j_data['test_set'] + train_seg_list = [] + val_seg_list = [] + test_seg_list = [] + + for i in tq(train_set): + if i in pos_slices_json: + train_seg_list.append(i) + + for i in tq(valid_set): + if i in pos_slices_json: + val_seg_list.append(i) + + for i in tq(test_set): + if i in pos_slices_json: + test_seg_list.append(i) + + patch_npy={} + patch_npy = defaultdict(lambda:[],patch_npy) + patch_npy['train_set'] = train_seg_list + patch_npy['valid_set'] = val_seg_list + patch_npy['test_set'] = test_seg_list + + with open(save_path+'/'+patchtype+'_patchlist_f'+str(fold_no)+'.json', 'w') as z: + json.dump(patch_npy,z) + + + +def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,category='train_set'): + """Gereates patches which doesn't have nodules + + Parameters + ---------- + jsonpath: str + Folder location where json files are stored + fold: int + Fold number + category: str + train_set/val_set/test_set + data_path: str + Folder location where img numpy arrays are stored + lung_segpath: str + Folder location where lung segmentation mask is stored + savepath: strr + Folder location to save the generated patches + + Returns + ------- + None + """ + + imgpath = data_path + '/img' + + with open(jsonpath+'negative_patchlist_f'+str(fold)+'.json') as file: + j_data = json.load(file) + + img_dir = imgpath + mask_dir = lung_segpath + nm_list = j_data[category] + + size = 64 + index = 0 + for img_name in tq(nm_list): + #Loading the masks as uint8 as threshold function accepts 8bit image as parameter. + img = np.load(os.path.join(img_dir, img_name)).astype(np.float32)#*255 + mask = np.load(os.path.join(mask_dir, img_name)).astype(np.uint8)#*255 + + if np.any(mask): + #Convert grayscale image to binary + _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value + contours, hierarchy = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours = sorted(contours, key=lambda x: cv2.contourArea(x)) + + #In certain cases there could be more than 2 contour, hence taking the largest 2 which will be lung + contours = contours[1:] + + + for cntr in contours: + patch_count = 2 + for i in range(patch_count): + xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width + xc,yc = xr+wr/2,yr+hr/2 + + try: + + x, y = random.randrange(xr, xr+wr-size/2),random.randrange(yr, yr+hr-size/2) + + except: + prob = random.randrange(0, 1) + if prob>0.5: + x, y = random.randrange(xr, xr+wr/2),random.randrange(yr, yr+hr/2) + else: + x, y = random.randrange(int(xr+wr/2),xr+wr),random.randrange(int(yr+hr/2),yr+hr) + + if x+size<512 & y+size<512: + patch_img = img[y: y+size, x: x+size].copy().astype(np.float16) + patch_mask = np.zeros((size,size)).astype(np.float16) + + else: + if x-size<=0 & y-size<=0: + patch_img = img[0: size, 0: size].copy().astype(np.float16) + patch_mask = np.zeros((size,size)).astype(np.float16) + + elif x-size<=0 & y-size>0: + patch_img = img[y-size: y, 0: size].copy().astype(np.float16) + patch_mask = np.zeros((size,size)).astype(np.float16) + + elif x-size>0 & y-size<=0: + patch_img = img[0: size, x-size: x].copy().astype(np.float16) + patch_mask = np.zeros((size,size)).astype(np.float16) + + else: + + patch_img = img[y-size: y, x-size: x].copy().astype(np.float16) + patch_mask = np.zeros((size,size)).astype(np.float16) + + + + if np.shape(patch_img) != (64,64): + print('shape',np.shape(patch_img)) + print('cordinate of patch',x,x+size,y,y+size) + print('cordinate of BBox',xr,yr,wr,hr) + + index += 1 + img_savepath = savepath+'/patches/'+'/img/' + mask_savepath = savepath+'/patches/'+'/mask/' + if not os.path.isdir(img_savepath): + os.makedirs(savepath+'/patches/'+'/img/') + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + else: + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + + if not os.path.isdir(mask_savepath): + os.makedirs(savepath+'/patches/'+'/mask/') + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + else: + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + + +def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set'): + """Generate patches with nodules + + Parameters + ---------- + jsonpath: str + Folder location where json files are stored + fold: int + Fold number + category: str + train_set/val_set/test_set + data_path: str + Folder location which has folder img and mask + + savepath: strr + Folder location to save the generated patches + + Returns + ------- + None + + """ + imgpath = data_path + '/img/' + maskpath = data_path + '/mask/' + + with open(jsonpath+'/positive_patchlist_f'+str(fold)+'.json') as file: + j_data = json.load(file) + + img_dir = imgpath + mask_dir = maskpath + nm_list = j_data[category] + + size = 64 + index = 0 + for img_name in tq(nm_list): + #Loading the masks as uint8 as threshold function accepts 8bit image as parameter. + img = np.load(os.path.join(img_dir, img_name)).astype(np.float16) + mask = np.load(os.path.join(mask_dir, img_name))/255 + mask = mask.astype(np.uint8) + + if np.any(mask): + #Convert grayscale image to binary + _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value + contours, hierarchy = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours = sorted(contours, key=lambda x: cv2.contourArea(x)) + + for cntr in contours: + patch_count = 4 + + xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width + xc,yc = int(xr+wr/2),int(yr+hr/2) + + if int(yc-size/2) <0 or int(xc-size/2)<0: + if int(yc-size/2) <0 and int(xc-size/2)<0: + patch_img1 = img[0:size , 0:size].copy().astype(np.float16) + patch_mask1 = mask[0:size , 0:size].copy().astype(np.float16) + + elif int(yc-size/2) >0 and int(xc-size/2)<0: + patch_img1 = img[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) + patch_mask1 = mask[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) + + elif int(yc-size/2) <0 and int(xc-size/2)>0: + patch_img1 = img[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + + + elif int(yc+size/2)>512 or int(xc+size/2)>512: + if int(yc+size/2)>512 and int(xc+size/2)>512: + m = yc+size - 512 + n = xc + size - 512 + patch_img1 = img[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) + patch_mask1 = mask[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) + + elif int(yc+size/2)>512 and int(xc+size/2)<512: + m = yc+size - 512 + patch_img1 = img[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + + elif int(yc+size/2)<512 and int(xc+size/2)>512: + n = xc+size - 512 + patch_img1 = img[int(yc-size/2):int(yc+size/2),int(xc-n):512].copy().astype(np.float16) + patch_mask1 = mask[int(yc-size/2):int(yc+size/2),int(xc-n):512].copy().astype(np.float16) + + elif (int(yc-size/2)>=0 and int(yc+size/2)<=512) : + if(int(xc-size/2)>=0 and int(xc+size/2)<=512): + patch_img1 = img[int(yc-size/2):int(yc+size/2) , int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[int(yc-size/2):int(yc+size/2) , int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + + if np.shape(patch_img1) != (64,64): + print('shape',np.shape(patch_img1)) + print('cordinate of patch',x,x+size,y,y+size) + print('cordinate of BBox',xr,yr,wr,hr) + + img_savepath = savepath+'/patches/'+category+'/img/' + mask_savepath = savepath+'/patches/'+category+'/mask/' + if not os.path.isdir(img_savepath): + os.makedirs(savepath+'/patches/'+category+'/img/') + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img1) + else: + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img1) + + if not os.path.isdir(mask_savepath): + os.makedirs(savepath+'/patches/'+category+'/mask/') + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask1) + else: + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask1) + + index += 1 + for i in range(patch_count): + xc,yc = xr,yr + xc,yc = xr+wr,yr+hr + + if i == 0: + + if xc+size<512 and yc+size<512: + patch_img = img[yc:yc+size,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc:yc+size,xc:xc+size].copy().astype(np.float16) + + elif xc+size>512 and yc+size<512: + m = xc+size-512 + patch_img = img[yc:yc+size,xc-m:xc+size-m].copy().astype(np.float16) + patch_mask = mask[yc:yc+size,xc-m:xc+size-m].copy().astype(np.float16) + + elif xc+size<512 and yc+size>512: + n = yc+size-512 + patch_img = img[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) + else: + m = xc+size-512 + n = yc+size-512 + patch_img = img[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) + elif i ==1: + + if xc-size>0 and yc+size<512: + patch_img = img[yc:yc+size,xc-size:xc].copy().astype(np.float16) + patch_mask = mask[yc:yc+size,xc-size:xc].copy().astype(np.float16) + + elif xc-size<0 and yc+size<512: + + patch_img = img[yc:yc+size,0:size].copy().astype(np.float16) + patch_mask = mask[yc:yc+size,0:size].copy().astype(np.float16) + + elif xc-size>0 and yc+size>512: + n = yc+size-512 + + patch_img = img[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) + + else: + n = yc+size-512 + + patch_img = img[yc-n:yc+size-n,0:size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,0:size].copy().astype(np.float16) + elif i ==2: + + if xc+size<512 and yc-size>0: + patch_img = img[yc-size:yc,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc:xc+size].copy().astype(np.float16) + + elif xc+size>512 and yc-size>0: + m = xc+size-512 + patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + + elif xc+size<512 and yc-size<0: + patch_img = img[0:size,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[0:size,xc:xc+size].copy().astype(np.float16) + + else: + m = xc+size-512 + patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + + elif i==3: + + if xc-size>0 and yc-size>0: + patch_img = img[yc-size:yc,xc-size:xc].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc-size:xc].copy().astype(np.float16) + + elif xc-size<0 and yc-size>0: + m = xc+size-512 + patch_img = img[yc-size:yc,0:size].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,0:size].copy().astype(np.float16) + + elif xc-size>0 and yc-size<0: + patch_img = img[0:size,xc-size:xc].copy().astype(np.float16) + patch_mask = mask[0:size,xc-size:xc].copy().astype(np.float16) + + else: + patch_img = img[0:size,0:size].copy().astype(np.float16) + patch_mask = mask[0:size,0:size].copy().astype(np.float16) + + + if np.shape(patch_img) != (64,64): + print('shape',np.shape(patch_img)) + + img_savepath = savepath+'/patches/'+category+'/img/' + mask_savepath = savepath+'/patches/'+category+'/mask/' + if not os.path.isdir(img_savepath): + os.makedirs(savepath+'/patches/'+category+'/img/') + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + else: + np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + + if not os.path.isdir(mask_savepath): + os.makedirs(savepath+'/patches/'+category+'/mask/') + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + else: + np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + + index += 1 + + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py new file mode 100644 index 00000000000..8dee91c1716 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# coding: utf-8 + + +import SimpleITK as sitk +import pylidc as pl +import matplotlib.pyplot as plt +import numpy as np +from collections import defaultdict +import os +import glob +import cv2 +from tqdm import tqdm as tq + + +def make_mask(height,width,slice_list,*args, **kwargs): + """Creates masks from the annotations given. + + Parameters + ---------- + height: int + Height of the mask to be created + widht: int + Width of mask image to be created + slice_list: list + + ii,jj: dict + Dictionary containing annotations + + Returns + ------- + nparray + + + """ + mask=np.zeros((height,width)) + n=kwargs.get('n', None) + point_dictx=kwargs.get('ii', None) + point_dicty=kwargs.get('jj', None) + + + if n in slice_list: + temp_listx=point_dictx[n] + temp_listy=point_dicty[n] + plot_listx= [sum(x)/len(point_dictx[n]) for x in zip(*temp_listx)] + plot_listy= [sum(y)/len(point_dicty[n]) for y in zip(*temp_listy)] + merged_list =np.array([[plot_listy[i],plot_listx[i]] for i in range(0, len(plot_listx))]) + + cv2.fillPoly(mask,pts=np.int32([merged_list]),color=(255,255,255)) + + return mask + +def extract_slices(dataset_path,save_path,masktype='nodule'): + """Extracts induvidual slices from the CT volumes given + in the dataset, clips the max-min values and stores them + as numpy arrays. + + Parameters + ---------- + dataset_path: str + Folder location of the dataset + save_path: str + Folder location to save the induvidual image & masks. + masktype: str + Nodule mask or Lung mask + + Returns + ------- + + None + """ + + + file_list=[] + + for tr in tq(range(10)): + subset_path=dataset_path+"/subset"+str(tr)+"/" + for file in os.listdir(subset_path): + if file.endswith(".mhd"): + file_list.append(os.path.join(subset_path, file)) + + + for file in tq(file_list): + file_name=os.path.basename(file) + series_instance_uid=os.path.splitext(file_name)[0] + img_file=file + + itk_img = sitk.ReadImage(img_file) + img_array = sitk.GetArrayFromImage(itk_img) + num_slice, height, width = img_array.shape + #Has the image data + + scan = pl.query(pl.Scan).filter(pl.Scan.series_instance_uid== series_instance_uid).first() + + #Maped the image data with annotation using series id + + nods = scan.cluster_annotations() #Function used to determine which annotation belongs to which nodule + + + + nodule_dict={} #Dict to store number of contour markings for that nodule + slice_list=[] # List to store the slices which has nodules marked + points_dictx={} # These dicts are to store the points to be plotted (key=slice_index, ) + points_dicty={} + points_dictx = defaultdict(lambda:[],points_dictx) + points_dicty = defaultdict(lambda:[],points_dicty) + for i,nod in enumerate(nods): + nodule_dict[i]=len(nods[i]) #Stores a dict which has count of annotation for each nodule + + for key,value in nodule_dict.items(): + #if value>=3 : #Taking annotations provided by 3 or more annotator + for i in range(value): + ann=nods[key][i] #-1 to keep index correct + con=ann.contours[0] #All coutours for specific nodule collected + + k = con.image_k_position # Returns the slice number/index which has the nodule + slice_list.append(k) + ii,jj = ann.contours[0].to_matrix(include_k=False).T + points_dictx[k].append(ii) + points_dicty[k].append(jj) + + + ''' + !!Note!! The pylidc package gives cordinates for single slices, If more than one annotaions are give then + Sum(x)/total no: of annotation for all provided pixel is given as input + + ''' + + + for n in range(1,num_slice): + + image=(img_array[n].copy()).astype(np.float32) + im_max = np.max(image) + im_min = np.min(image) + if im_max != 0: + image[image>1000]=1000 + image[image<-1000]=-1000 + mask=make_mask(height,width,slice_list,ii=points_dictx,jj=points_dicty,n=n) + mask = np.array(mask, dtype=np.float32) + image = image - image.min() + image = image/image.max() + + if not os.path.isdir(save_path): + os.makedirs(save_path) + + if not os.path.isdir(save_path+'/img'): + os.makedirs(save_path+'/img') + np.save(save_path+'/img/'+series_instance_uid+'_slice'+str(n)+'.npy',image) + else: + np.save(save_path+'/img/'+series_instance_uid+'_slice'+str(n)+'.npy',image) + + if not os.path.isdir(save_path+'/mask'): + os.makedirs(save_path+'/mask') + np.save(save_path+'/mask/'+series_instance_uid+'_slice'+str(n)+'.npy',mask) + else: + np.save(save_path+'/mask/'+series_instance_uid+'_slice'+str(n)+'.npy',mask) + +def generate_lungseg(dataset_path,save_path): + volume_list = os.listdir(dataset_path) + file_list = [] + + for file in os.listdir(dataset_path): + if file.endswith(".mhd"): + file_list.append(os.path.join(dataset_path, file)) + + for img_file in tq(file_list): + file_name=os.path.basename(img_file) + series_instance_uid=os.path.splitext(file_name)[0] + itk_img = sitk.ReadImage(img_file) + img_array = sitk.GetArrayFromImage(itk_img) + num_slice, height, width = img_array.shape + for n in range(1,num_slice): + if not os.path.isdir(save_path+'/lungseg'): + os.makedirs(save_path+'/lungseg') + np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) + else: + np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py new file mode 100644 index 00000000000..e25d99fd20e --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# coding: utf-8 + +import json +import numpy as np +import os +import SimpleITK as sitk +import matplotlib.pyplot as plt + +def visualize_data(series_uid,slice_num,datapath,savepath): + """ To visualize the image and nodule masks of the dataset + + Parameters + ---------- + + series_uid: str + Series_instance_uid or filename of the image to visualize + slice_num: int + Slice number to visulaize + datapath: str + Folder location where image and mask numpy is stored. + savepath: str + Folder location to save images + + + """ + + img_name = series_uid+'_slice'+str(slice_num)+'.npy' + mask = np.load(datapath+'mask/'+img_name) + img = np.load(datapath+'img/'+img_name) + lungseg = np.load(datapath+'lungseg/'+img_name) + + plt.figure() + plt.subplot(131) + plt.imshow(img,cmap='gray') + plt.title('Original Image') + plt.subplot(132) + plt.imshow(mask,cmap='gray') + plt.title('Ground truth (Lung)') + plt.subplot(133) + plt.imshow(mask,cmap='gray') + plt.title('Ground truth (Nodule)') + plt.savefig(savepath+'visualization.png') + plt.show() + plt.close() + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py new file mode 100644 index 00000000000..671044c0db4 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py @@ -0,0 +1,102 @@ +import argparse +from prep_pack import visualize +from prep_pack import generate_slices +from prep_pack import create_folds +from prep_pack import generate_patches + + +def main(args): + + if args.genslice: + dataset_path = args.datasetpath + save_path = args.savepath + masktype = args.masktype + if masktype == 'nodule': + generate_slices.extract_slices(dataset_path,save_path,masktype) + else: + generate_slices.generate_lungseg(dataset_path,save_path) + + elif args.createfolds: + data_path = args.datapath + save_path = args.savepath + dataset_path = args.datasetpath + + create_folds.positive_negative_classifier(data_path,save_path) + dict_subset = create_folds.subset_classifier(dataset_path,save_path) + create_folds.assign_folds(dict_subset,save_path) + if args.additional: + create_folds.create_balanced_dataset(save_path,data_path,additional=True) + else: + create_folds.create_balanced_dataset(save_path,data_path) + + + elif args.genpatch: + jsonpath = args.jsonpath + foldno = args.foldno + category = args.category + data_path = args.datapath + lungsegpath = args.lungsegpath + savepath = args.savepath + patchtype = args.patchtype + if patchtype == 'positive': + generate_patches.generate_patchlist(jsonpath,patchtype,foldno) + generate_patches.generate_positive_patch(jsonpath,foldno,data_path,savepath,category) + else: + generate_patches.generate_patchlist(jsonpath,patchtype,foldno) + generate_patches.generate_negative_patch(jsonpath,foldno,data_path,lungsegpath,savepath,category) + + + elif args.visualize: + seriesuid = args.seriesuid + slice_num = args.sliceno + data_path = args.datapath + savepath = args.savepath + + visualize.visualize_data(seriesuid,slice_num,data_path,savepath) + + else: + print('Arguments not passed. Use -h for help') + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Select action to be performed') + + parser.add_argument('--genslice', default=False, action='store_true', + help='To create slices from 3D volume') + parser.add_argument('--createfolds', default=False, action='store_true', + help='Split dataset into 10 folds') + parser.add_argument('--genpatch', default=False, action='store_true', + help='To create patches from 2D slices') + parser.add_argument('--visualize', default=False, action='store_true', + help='Visualize any one of the slices') + + + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--masktype', + help='Type of mask to be generated. ie, nodule or lung') + parser.add_argument('--datasetpath', + help='Folder location of downloaded dataset') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--additional', default=False, action='store_true', + help='Add additional slices') + parser.add_argument('--category', + help='Category of data.[trainset,valset,testset]') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--datapath', + help='Folder location containing img and mask folders') + parser.add_argument('--lungsegpath', + help='Folder containing lung segmentation mask') + parser.add_argument('--patchtype', + help='positive or negative') + parser.add_argument('--sliceno', + help='Slice number to visualize') + parser.add_argument('--seriesuid', + help='Seriesuid of slice to visualize') + + args= parser.parse_args() + + main(args) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py new file mode 100644 index 00000000000..1c0fc1e8cb3 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# coding: utf-8 + +import torch +from torch.utils import data +import os +from torchvision import transforms +from PIL import Image +import numpy as np +from glob import glob +from natsort import natsorted + + + +class LungDataLoader(data.Dataset): + """Class represents the dataloader for Lung segmentation task + + Atributes + --------- + datapath: str + Folder location where img is stored + lung_path: str + Folder location where lung seg mask is stored + json_file: str + Folder location where json files are stored + split: str + String to determine train/val and test set + is_transform: Boolean + True if transformation is to be applied + img_size: int + Size of input image + + """ + def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= True,img_size= 512): + + self.split=split + self.path=path + self.lung_path=lung_path + self.json = json_file + self.files = self.json[self.split] + self.img_size= img_size + self.is_transform= is_transform + self.image_tf= transforms.Compose( + [transforms.Resize(self.img_size), + transforms.ToTensor() + ]) + + self.lung_tf = transforms.Compose( + [transforms.Resize(self.img_size), + transforms.ToTensor() + ]) + + + def __len__(self): + return len(self.files) + + + + def __getitem__(self,index): + + filename = self.files[index] + img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) + lung_mask = Image.fromarray(np.load(self.lung_path+filename).astype(float)) + + if self.is_transform: + img, lung_mask = self.transform(img,lung_mask) + labels = torch.cat((1.-lung_mask,lung_mask)) # + + return img, labels + + def transform(self,img,lung_mask): + img = self.image_tf(img) + img = img.type(torch.FloatTensor) + lung_mask = self.lung_tf(lung_mask) + lung_mask = lung_mask.type(torch.FloatTensor) + + + return img,lung_mask + + + + + +class LungPatchDataLoader(data.Dataset): + + def __init__(self,imgpath,split="train",is_transform= True): + + self.split = split + + self.imgpath = imgpath+self.split+'/img/' + self.is_transform = is_transform + self.files = os.listdir(self.imgpath) + + + def __len__(self): + return len(self.files) + + + + def __getitem__(self,index): + + filename = self.files[index] + l1 = int(filename.split('_')[1]) + if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 + l2 = 0 + else: + l2 = 1 + label = torch.tensor([l1,l2]) + img = np.load(self.imgpath+filename) + size_dataset = len(os.listdir(self.imgpath)) + + + + if self.is_transform: + img= self.transform(img) + + return img,label + + + + def transform(self,img): + img = torch.Tensor(img).unsqueeze(0) + img = img.type(torch.FloatTensor) + + + return img + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py new file mode 100644 index 00000000000..3a10ad79bba --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# coding: utf-8 + +import numpy as np +import torch +import torch.nn as nn +from torch import optim +from tqdm import tqdm as tq +import time +from torch.utils import data +import os +import torch.nn.functional as F +from torch.autograd import Variable +import matplotlib.pyplot as plt +plt.switch_backend('agg') +from .sumnet_bn_vgg import SUMNet +from .r2unet import R2U_Net +from .r2unet import U_Net +from torchvision import transforms +import json +from PIL import Image +from .dataloader import LungDataLoader +from .utils import dice_coefficient + + +def infer_lungseg(fold_no,savepath,network,jsonpath): + """ Inference script for lung segmentation + + Parameters + ---------- + fold_no: int + Fold number to which action is to be performed + savepath: str + Folder location to save the results + network: str + Network name + jsonpath: + Folder location where file is to be stored + + Returns + ------- + None + + """ + + fold = 'fold'+str(fold_no) + + savePath = savepath+network+'/'+fold+'/' + if not os.path.isdir(savePath): + os.makedirs(savePath) + + with open(jsonpath+fold+'_pos_neg_eq.json') as f: + json_file = json.load(f) + test_set = json_file['test_set'] + + testDset = LungDataLoader(is_transform=True,json_file=json_file,split="test_set",img_size=512) + testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) + + testBatches = 0 + testDice_lungs = 0 + + if network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + elif network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + else: + net = R2U_Net(img_ch=1,output_ch=2) + + + dice_list = [] + use_gpu = torch.cuda.is_available() + + if use_gpu: + net = net.cuda() + + net.load_state_dict(torch.load(savePath+network+'_best_lungs.pt')) + + for data1 in tq(testDataLoader): + + imgs, mask = data1 + labels = mask + if use_gpu: + inputs = imgs.cuda() + labels = labels.cuda() + + net_out = net(Variable(inputs)) + net_out_sf = F.softmax(net_out.data,dim=1) + + test_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + + pred_max = torch.argmax(net_out_sf, dim=1) + preds = torch.zeros(pred_max.shape) + preds[pred_max == 1] = 1 + + if not os.path.isdir(savePath+'seg_results/GT/'): + os.makedirs(savePath+'seg_results/GT/') + np.save(savePath+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + else: + np.save(savePath+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + + if not os.path.isdir(savePath+'seg_results/pred/'): + os.makedirs(savePath+'seg_results/pred/') + np.save(savePath+'seg_results/pred/image'+str(testBatches),preds.cpu()) + else: + np.save(savePath+'seg_results/pred/image'+str(testBatches),preds.cpu()) + + if not os.path.isdir(savePath+'seg_results/image/'): + os.makedirs(savePath+'seg_results/image/') + np.save(savePath+'seg_results/image/image'+str(testBatches),inputs.cpu()) + else: + np.save(savePath+'seg_results/image/image'+str(testBatches),inputs.cpu()) + + testDice_lungs += test_dice[0] + dice_list.append(test_dice[0].cpu()) + testBatches += 1 + # if testBatches>1: + # break + + dice = np.mean(dice_list) + print("Result:",fold,dice) + + + #Plots distribution of min values per volume + plt.figure() + plt.title('Distribution of Dice values') + plt.hist(dice_list) + plt.xlabel('Dice score') + plt.ylabel('No. of Slices') + plt.savefig(savePath+'dice_dist.jpg') + # plt.show() + plt.close() + + + +def visualise_seg(loadpath): + """ + To visualise the segmentation performance(Qualitative results) + + Parameters + ---------- + + loadpath: str + Folder location from where the files are to be loaded + + Returns + ------- + None + + """ + + image_list = os.listdir(loadpath+'GT/') + count = 0 + for i in tq(image_list): + img = np.load(loadpath+'image/'+i) + GT = np.load(loadpath+'GT/'+i) + pred = np.load(loadpath+'pred/'+i) + + plt.figure(figsize = [15,5]) + plt.subplot(141) + plt.axis('off') + plt.title('Input Image') + plt.imshow(img[0][0],cmap = 'gray') + plt.subplot(142) + plt.axis('off') + plt.title('GT') + plt.imshow(GT[0],cmap = 'gray') + plt.subplot(143) + plt.axis('off') + plt.title('Pred') + plt.imshow(pred[0],cmap = 'gray') + plt.subplot(144) + plt.title('GT - Pred') + plt.axis('off') + test = GT[0]-pred[0] + test[test>0] = 1 + test[test<=0] = 0 + plt.imshow(test,cmap = 'gray') + count += 1 + + if not os.path.isdir(loadpath+'seg_results/op_images/'): + os.makedirs(loadpath+'seg_results/op_images/') + plt.savefig(loadpath+'seg_results/op_images/img'+str(count)+'.jpg') + else: + plt.savefig(loadpath+'seg_results/op_images/img'+str(count)+'.jpg') + + # if count>10: + # break + + + + + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py new file mode 100644 index 00000000000..573fcb8d292 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# coding: utf-8 + + +from __future__ import print_function, division + +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +from torch.utils import data +from torchvision import transforms +import torchvision +from torchvision import datasets, models, transforms +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import matplotlib.pyplot as plt +import time +import os +import copy +from PIL import Image +from tqdm import tqdm_notebook as tq +from sklearn.metrics import confusion_matrix +from dataloader import LungPatchDataLoader +from lenet import LeNet + +def infer_classifier(modelpath,imgpath): + + testDset = LungPatchDataLoader(imgpath,is_transform=True,split="test") + testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True) + classification_model_loadPath = modelpath + net = LeNet() + + use_gpu = torch.cuda.is_available() + + if use_gpu: + net = net.cuda() + net.load_state_dict(torch.load(classification_model_loadPath+'lenet_best.pt')) + + optimizer = optim.Adam(net.parameters(), lr = 1e-4, weight_decay = 1e-5) + criterion = nn.BCEWithLogitsLoss() + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5, verbose=True) + testRunningCorrects = 0 + testRunningLoss = 0 + testBatches = 0 + pred_arr = [] + label_arr = [] + for data1 in tq(testDataLoader): + img, label = data1 + if use_gpu: + inputs = img.cuda() + label = label.float() + label = label.cuda() + + net_out = net(Variable(inputs)) + + net_loss = criterion(net_out,label) + preds = torch.zeros(net_out.shape).cuda() + preds[net_out > 0.5] = 1 + preds[net_out <= 0.5] = 0 + + testRunningLoss += net_loss.item() + testRunningCorrects += torch.sum(preds == label.data.float()) + + for i,j in zip(preds.cpu().numpy(),label.cpu().numpy()): + pred_arr.append(i) + label_arr.append(j) + + testBatches += 1 + # if testBatches>0: + # break + + + + testepoch_loss = testRunningLoss/testBatches + testepoch_acc = 100*(int(testRunningCorrects)/len(pred_arr)) + + print(' Loss: {:.4f} | accuracy: {:.4f} '.format( + testepoch_loss,testepoch_acc)) + + + tn, fp, fn, tp = confusion_matrix(np.array(label_arr).flatten(), np.array(pred_arr).flatten()).ravel() + + print('True Negative :',tn) + print('false Negative :',fn) + print('True positive :',tp) + print('False positive :',fp) + specificity = tn/(tn+fp) + sensitivity = tp/(tp+fn) + print('Specificity :',specificity) + print('Sensitivity :',sensitivity) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py new file mode 100644 index 00000000000..ae265121d9a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py @@ -0,0 +1,36 @@ + + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LeNet(nn.Module): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = nn.Conv2d(1, 6, kernel_size=5) + self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv2 = nn.Conv2d(6, 16, kernel_size=5) + self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3 = nn.Conv2d(16, 16, kernel_size=5) + self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3_drop = nn.Dropout2d() + self.fc1 = nn.Linear(256, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 2) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = self.pool1(x) + x = F.relu(self.conv2(x)) + x = self.pool2(x) + x = F.relu(self.conv3_drop(self.conv3(x))) + x = self.pool3(x) + x = x.view(-1, 256) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = F.relu(self.fc2(x)) + x = F.dropout(x, training=self.training) + x = self.fc3(x) + return torch.sigmoid(x) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py new file mode 100644 index 00000000000..88164e15411 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py @@ -0,0 +1,425 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import init + +def init_weights(net, init_type='normal', gain=0.02): + def init_func(m): + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) + +class conv_block(nn.Module): + def __init__(self,ch_in,ch_out): + super(conv_block,self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True), + nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + + def forward(self,x): + x = self.conv(x) + return x + +class up_conv(nn.Module): + def __init__(self,ch_in,ch_out): + super(up_conv,self).__init__() + self.up = nn.Sequential( + nn.Upsample(scale_factor=2), + nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + x = self.up(x) + return x + +class Recurrent_block(nn.Module): + def __init__(self,ch_out,t=2): + super(Recurrent_block,self).__init__() + self.t = t + self.ch_out = ch_out + self.conv = nn.Sequential( + nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + for i in range(self.t): + + if i==0: + x1 = self.conv(x) + + x1 = self.conv(x+x1) + return x1 + +class RRCNN_block(nn.Module): + def __init__(self,ch_in,ch_out,t=2): + super(RRCNN_block,self).__init__() + self.RCNN = nn.Sequential( + Recurrent_block(ch_out,t=t), + Recurrent_block(ch_out,t=t) + ) + self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0) + + def forward(self,x): + x = self.Conv_1x1(x) + x1 = self.RCNN(x) + return x+x1 + + +class single_conv(nn.Module): + def __init__(self,ch_in,ch_out): + super(single_conv,self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + x = self.conv(x) + return x + +class Attention_block(nn.Module): + def __init__(self,F_g,F_l,F_int): + super(Attention_block,self).__init__() + self.W_g = nn.Sequential( + nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(F_int) + ) + + self.W_x = nn.Sequential( + nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(F_int) + ) + + self.psi = nn.Sequential( + nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(1), + nn.Sigmoid() + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self,g,x): + g1 = self.W_g(g) + x1 = self.W_x(x) + psi = self.relu(g1+x1) + psi = self.psi(psi) + + return x*psi + + +class U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1): + super(U_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + + self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) + self.Conv2 = conv_block(ch_in=64,ch_out=128) + self.Conv3 = conv_block(ch_in=128,ch_out=256) + self.Conv4 = conv_block(ch_in=256,ch_out=512) + self.Conv5 = conv_block(ch_in=512,ch_out=1024) + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_conv4 = conv_block(ch_in=512, ch_out=256) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_conv3 = conv_block(ch_in=256, ch_out=128) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_conv2 = conv_block(ch_in=128, ch_out=64) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.Conv1(x) + + x2 = self.Maxpool(x1) + x2 = self.Conv2(x2) + + x3 = self.Maxpool(x2) + x3 = self.Conv3(x3) + + x4 = self.Maxpool(x3) + x4 = self.Conv4(x4) + + x5 = self.Maxpool(x4) + x5 = self.Conv5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + + d5 = self.Up_conv5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_conv4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_conv3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_conv2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + +class R2U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1,t=2): + super(R2U_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Upsample = nn.Upsample(scale_factor=2) + + self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) + + self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) + + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) + + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) + + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) + + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.RRCNN1(x) + + x2 = self.Maxpool(x1) + x2 = self.RRCNN2(x2) + + x3 = self.Maxpool(x2) + x3 = self.RRCNN3(x3) + + x4 = self.Maxpool(x3) + x4 = self.RRCNN4(x4) + + x5 = self.Maxpool(x4) + x5 = self.RRCNN5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_RRCNN5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_RRCNN4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_RRCNN3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_RRCNN2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + + +class AttU_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1): + super(AttU_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + + self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) + self.Conv2 = conv_block(ch_in=64,ch_out=128) + self.Conv3 = conv_block(ch_in=128,ch_out=256) + self.Conv4 = conv_block(ch_in=256,ch_out=512) + self.Conv5 = conv_block(ch_in=512,ch_out=1024) + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) + self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) + self.Up_conv4 = conv_block(ch_in=512, ch_out=256) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) + self.Up_conv3 = conv_block(ch_in=256, ch_out=128) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) + self.Up_conv2 = conv_block(ch_in=128, ch_out=64) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.Conv1(x) + + x2 = self.Maxpool(x1) + x2 = self.Conv2(x2) + + x3 = self.Maxpool(x2) + x3 = self.Conv3(x3) + + x4 = self.Maxpool(x3) + x4 = self.Conv4(x4) + + x5 = self.Maxpool(x4) + x5 = self.Conv5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + x4 = self.Att5(g=d5,x=x4) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_conv5(d5) + + d4 = self.Up4(d5) + x3 = self.Att4(g=d4,x=x3) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_conv4(d4) + + d3 = self.Up3(d4) + x2 = self.Att3(g=d3,x=x2) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_conv3(d3) + + d2 = self.Up2(d3) + x1 = self.Att2(g=d2,x=x1) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_conv2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + +class R2AttU_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1,t=2): + super(R2AttU_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Upsample = nn.Upsample(scale_factor=2) + + self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) + + self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) + + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) + + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) + + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) + + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) + self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) + self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) + self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) + self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.RRCNN1(x) + + x2 = self.Maxpool(x1) + x2 = self.RRCNN2(x2) + + x3 = self.Maxpool(x2) + x3 = self.RRCNN3(x3) + + x4 = self.Maxpool(x3) + x4 = self.RRCNN4(x4) + + x5 = self.Maxpool(x4) + x5 = self.RRCNN5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + x4 = self.Att5(g=d5,x=x4) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_RRCNN5(d5) + + d4 = self.Up4(d5) + x3 = self.Att4(g=d4,x=x3) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_RRCNN4(d4) + + d3 = self.Up3(d4) + x2 = self.Att3(g=d3,x=x2) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_RRCNN3(d3) + + d2 = self.Up2(d3) + x1 = self.Att2(g=d2,x=x1) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_RRCNN2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py new file mode 100644 index 00000000000..e2fa72290b2 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Nov 6 22:57:38 2018 + +@author: sumanthnandamuri +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import models + +class SUMNet(nn.Module): + def __init__(self,in_ch,out_ch): + super(SUMNet, self).__init__() + + self.encoder = models.vgg11_bn(pretrained = True).features + self.preconv = nn.Conv2d(in_ch, 3, 1) + self.conv1 = self.encoder[0] + self.bn1 = self.encoder[1] + self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv2 = self.encoder[4] + self.bn2 = self.encoder[5] + self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv3a = self.encoder[8] + self.bn3 = self.encoder[9] + self.conv3b = self.encoder[11] + self.bn4 = self.encoder[12] + self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv4a = self.encoder[15] + self.bn5 = self.encoder[16] + self.conv4b = self.encoder[18] + self.bn6 = self.encoder[19] + self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv5a = self.encoder[22] + self.bn7 = self.encoder[23] + self.conv5b = self.encoder[25] + self.bn8 = self.encoder[26] + self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) + + self.unpool5 = nn.MaxUnpool2d(2, 2) + self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) + self.unpool4 = nn.MaxUnpool2d(2, 2) + self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) + self.unpool3 = nn.MaxUnpool2d(2, 2) + self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) + self.donv3a = nn.Conv2d(256,128, 3, padding = 1) + self.unpool2 = nn.MaxUnpool2d(2, 2) + self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) + self.unpool1 = nn.MaxUnpool2d(2, 2) + self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) + self.output = nn.Conv2d(32, out_ch, 1) + + def forward(self, x): + preconv = F.relu(self.preconv(x), inplace = True) + conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) + pool1, idxs1 = self.pool1(conv1) + conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) + pool2, idxs2 = self.pool2(conv2) + conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) + conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) + pool3, idxs3 = self.pool3(conv3b) + conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) + conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) + pool4, idxs4 = self.pool4(conv4b) + conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) + conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) + pool5, idxs5 = self.pool5(conv5b) + + unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) + donv5b = F.relu(self.donv5b(unpool5), inplace = True) + donv5a = F.relu(self.donv5a(donv5b), inplace = True) + unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) + donv4b = F.relu(self.donv4b(unpool4), inplace = True) + donv4a = F.relu(self.donv4a(donv4b), inplace = True) + unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) + donv3b = F.relu(self.donv3b(unpool3), inplace = True) + donv3a = F.relu(self.donv3a(donv3b)) + unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) + donv2 = F.relu(self.donv2(unpool2), inplace = True) + unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) + donv1 = F.relu(self.donv1(unpool1), inplace = True) + output = self.output(donv1) + return output \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py new file mode 100644 index 00000000000..3f89a37a4fd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py @@ -0,0 +1,28 @@ +import numpy as np +import torch +from torch.utils import data + +def dice_coefficient(pred1, target): + smooth = 1e-15 + pred = torch.argmax(pred1,dim=1) + num = pred.size()[0] + pred_1_hot = torch.eye(3)[pred.squeeze(1)].cuda() + pred_1_hot = pred_1_hot.permute(0, 3, 1, 2).float() + + target_1_hot = torch.eye(3)[target].cuda() + target_1_hot = target_1_hot.permute(0,3, 1, 2).float() + + m1_1 = pred_1_hot[:,1,:,:].view(num, -1).float() + m2_1 = target_1_hot[:,1,:,:].view(num, -1).float() + m1_2 = pred_1_hot[:,2,:,:].view(num, -1).float() + m2_2 = target_1_hot[:,2,:,:].view(num, -1).float() + + intersection_1 = (m1_1*m2_1).sum(1) + intersection_2 = (m1_2*m2_2).sum(1) + union_1 = (m1_1+m2_1).sum(1) + smooth - intersection_1 + union_2 = (m1_2+m2_2).sum(1) + smooth - intersection_2 + score_1 = intersection_1/union_1 + score_2 = intersection_2/union_2 + + return [score_1.mean()] + \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py new file mode 100644 index 00000000000..dc4288dfd2c --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py @@ -0,0 +1,42 @@ +from infer_pack import infer_lung_seg +from infer_pack import infer_patch_classifier +import argparse + +def main(args): + + if args.lungseg: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + network = args.network + infer_lung_seg.infer_lungseg(foldno,savepath,network,jsonpath) + else: + savepath = args.savepath + imgpath = args.imgpath + patch_classifier.lungpatch_classifier(savepath,imgpath) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Select action to be performed') + + parser.add_argument('--lungseg', default=False, action='store_true', + help='To test lung segmentation') + parser.add_argument('--patchclass', default=False, action='store_true', + help='test network to classify patch') + + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--imgpath', + help='Folder location where test images are stored') + parser.add_argument('--network', + help='Network to be trained') + + + args= parser.parse_args() + + main(args) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py new file mode 100644 index 00000000000..c93741f206c --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py @@ -0,0 +1,71 @@ +from train_pack import lung_seg +from train_pack import lung_seg_adv +from train_pack import patch_classifier +import argparse + +def main(args): + + if args.lungseg: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + datapath = args.datapath + lungsegpath = args.lungmask + network = args.network + if args.epochs: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) + + else: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) + + elif args.lungsegadv: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + datapath = args.datapath + lungsegpath = args.lungmask + network = args.network + if args.epochs: + lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) + else: + lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network) + else: + savepath = args.savepath + imgpath = args.datapath + if args.epochs: + patch_classifier.lungpatch_classifier(savepath,imgpath,args.epochs) + else: + patch_classifier.lungpatch_classifier(savepath,imgpath) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Select action to be performed') + + parser.add_argument('--lungseg', default=False, action='store_true', + help='To Train lung segmentation') + parser.add_argument('--lungsegadv', default=False, action='store_true', + help='To train lung seg network adversarially') + parser.add_argument('--patchclass', default=False, action='store_true', + help='Train network to classify patch') + + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--datapath', + help='Folder location where img and masks are stored') + parser.add_argument('--lungmask', + help='Folder location where lung masks are stored') + + parser.add_argument('--network', + help='Network to be trained') + parser.add_argument('--epochs', + help='Number of epochs') + + + args= parser.parse_args() + + main(args) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py @@ -0,0 +1 @@ + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py new file mode 100644 index 00000000000..d98c9ff2285 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# coding: utf-8 + +import torch +from torch.utils import data +import os +from torchvision import transforms +from PIL import Image +import numpy as np +from glob import glob +from natsort import natsorted + + + +class LungDataLoader(data.Dataset): + """Class represents the dataloader for Lung segmentation task + + ... + + + Atributes + --------- + datapath: str + Folder location where img is stored + lung_path: str + Folder location where lung seg mask is stored + json_file: str + Folder location where json files are stored + split: str + String to determine train/val and test set + is_transform: Boolean + True if transformation is to be applied + img_size: int + Size of input image + + """ + def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= True,img_size= 512): + + self.split=split + self.path= datapath + self.lung_path=lung_path + self.json = json_file + self.files = self.json[self.split] + self.img_size= img_size + self.is_transform= is_transform + self.image_tf= transforms.Compose( + [transforms.Resize(self.img_size), + transforms.ToTensor() + ]) + + self.lung_tf = transforms.Compose( + [transforms.Resize(self.img_size), + transforms.ToTensor() + ]) + + + def __len__(self): + return len(self.files) + + + + def __getitem__(self,index): + + filename = self.files[index] + img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) + lung_mask = Image.fromarray(np.load(self.lung_path+filename).astype(float)) + + if self.is_transform: + img, lung_mask = self.transform(img,lung_mask) + labels = torch.cat((1.-lung_mask,lung_mask)) # + + return img, labels + + def transform(self,img,lung_mask): + img = self.image_tf(img) + img = img.type(torch.FloatTensor) + lung_mask = self.lung_tf(lung_mask) + lung_mask = lung_mask.type(torch.FloatTensor) + + + return img,lung_mask + + + + + +class LungPatchDataLoader(data.Dataset): + + def __init__(self,imgpath,split="train_set",is_transform= True): + + self.split = split + + self.imgpath = imgpath+self.split+'/img/' + self.is_transform = is_transform + self.files = os.listdir(self.imgpath) + + + def __len__(self): + return len(self.files) + + + + def __getitem__(self,index): + + filename = self.files[index] + l1 = int(filename.split('_')[1]) + if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 + l2 = 0 + else: + l2 = 1 + label = torch.tensor([l1,l2]) + img = np.load(self.imgpath+filename) + size_dataset = len(os.listdir(self.imgpath)) + + + + if self.is_transform: + img= self.transform(img) + + return img,label + + + + def transform(self,img): + img = torch.Tensor(img).unsqueeze(0) + img = img.type(torch.FloatTensor) + + + return img + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py new file mode 100644 index 00000000000..ae265121d9a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py @@ -0,0 +1,36 @@ + + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LeNet(nn.Module): + def __init__(self): + super(LeNet, self).__init__() + self.conv1 = nn.Conv2d(1, 6, kernel_size=5) + self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv2 = nn.Conv2d(6, 16, kernel_size=5) + self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3 = nn.Conv2d(16, 16, kernel_size=5) + self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3_drop = nn.Dropout2d() + self.fc1 = nn.Linear(256, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 2) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = self.pool1(x) + x = F.relu(self.conv2(x)) + x = self.pool2(x) + x = F.relu(self.conv3_drop(self.conv3(x))) + x = self.pool3(x) + x = x.view(-1, 256) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = F.relu(self.fc2(x)) + x = F.dropout(x, training=self.training) + x = self.fc3(x) + return torch.sigmoid(x) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py new file mode 100644 index 00000000000..090f8bec646 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# coding: utf-8 + +import numpy as np +import torch +import torch.nn as nn +from torch import optim +from tqdm import tqdm as tq +import time +from torch.utils import data +import os +import torch.nn.functional as F +from torch.autograd import Variable +import matplotlib.pyplot as plt +plt.switch_backend('agg') +from .sumnet_bn_vgg import SUMNet +from .r2unet import R2U_Net +from .r2unet import U_Net +from torchvision import transforms +import json +from PIL import Image +from .data_loader import LungDataLoader +from .utils import dice_coefficient + +def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4): + """Training function for SUMNet,UNet,R2Unet + + Parameters + ---------- + fold_no: str + Fold number on which training is to be performed + save_path: str + Folder location to save the models and other plots + json_path: str + Folder location at which json files are stored + datapath: str + Folder location of data + lung_segpath: str + Folder location at which lung segmentation files are stored + network: str + Network to be trained + epochs: int, Default: 35 + Number of epochs for training + lrate: int, Default= 1e-4 + Learnig rate + + Returns + ------- + + None + """ + + fold = 'fold'+str(fold_no) + savePath = save_path+'/'+network+'/'+fold+'/' + if not os.path.isdir(savePath): + os.makedirs(savePath) + + with open(json_path+fold+'_pos_neg_eq.json') as f: + json_file = json.load(f) + train_set = json_file['train_set'] + val_set = json_file['valid_set'] + + + trainDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="train_set",img_size=512) + valDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) + trainDataLoader = data.DataLoader(trainDset,batch_size=4,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) + validDataLoader = data.DataLoader(valDset,batch_size=4,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) + + if network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + if network == 'r2unet': + net = R2U_Net(img_ch=1,output_ch=2) + if network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + + use_gpu = torch.cuda.is_available() + + if use_gpu: + net = net.cuda() + + + optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + + criterion = nn.BCEWithLogitsLoss() + + epochs = epochs + trainLoss = [] + validLoss = [] + trainDiceCoeff_lungs = [] + validDiceCoeff_lungs = [] + start = time.time() + + bestValidDice = torch.zeros(1) + bestValidDice_lungs = 0.0 + + + for epoch in range(epochs): + epochStart = time.time() + trainRunningLoss = 0 + validRunningLoss = 0 + trainBatches = 0 + validBatches = 0 + trainDice_lungs = 0 + validDice_lungs = 0 + + + net.train(True) + + for data1 in tq(trainDataLoader): + img, mask = data1 + labels = mask + if use_gpu: + inputs = img.cuda() + labels = labels.cuda() + + net_out = net(Variable(inputs)) + + + net_out_sf = F.softmax(net_out,dim=1) + + BCE_Loss = criterion(net_out[:,1],labels[:,1]) + + net_loss = BCE_Loss + + optimizer.zero_grad() + + net_loss.backward() + + optimizer.step() + + trainRunningLoss += net_loss.item() + + trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + trainDice_lungs += trainDice[0] + + trainBatches += 1 + # if trainBatches>1: + # break + + trainLoss.append(trainRunningLoss/trainBatches) + trainDiceCoeff_lungs.append(trainDice_lungs/trainBatches) + + with torch.no_grad(): + for data1 in tq(validDataLoader): + + imgs, mask = data1 + labels = mask + if use_gpu: + inputs = imgs.cuda() + labels = labels.cuda() + + net_out = net(Variable(inputs)) + net_out_sf = F.softmax(net_out.data,dim=1) + + + BCE_Loss = criterion(net_out[:,1],labels[:,1]) + + net_loss = BCE_Loss + + + val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + validDice_lungs += val_dice[0] + validRunningLoss += net_loss.item() + validBatches += 1 + # if validBatches>1: + # break + + validLoss.append(validRunningLoss/validBatches) + validDiceCoeff_lungs.append(validDice_lungs/validBatches) + + + + if (validDice_lungs.cpu() > bestValidDice_lungs): + bestValidDice_lungs = validDice_lungs.cpu() + torch.save(net.state_dict(), savePath+'sumnet_best_lungs.pt') + + plot=plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + if epoch==0: + plt.legend() + plt.savefig(savePath+'LossPlot.png') + plt.close() + epochEnd = time.time()-epochStart + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' + .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) + print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' + .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) + + print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) + trainLoss_np = np.array(trainLoss) + validLoss_np = np.array(validLoss) + trainDiceCoeff_lungs_np = np.array(trainDiceCoeff_lungs) + validDiceCoeff_lungs_np = np.array(validDiceCoeff_lungs) + + + print('Saving losses') + + torch.save(trainLoss_np, savePath+'trainLoss.pt') + torch.save(validLoss_np, savePath+'validLoss.pt') + torch.save(trainDiceCoeff_lungs_np, savePath+'trainDice_lungs.pt') + torch.save(validDiceCoeff_lungs_np, savePath+'validDice_lungs.pt') + + # if epoch>1: + # break + + end = time.time()-start + print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + + + plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + plt.title('Loss plot') + plt.legend() + plt.savefig(savePath+'trainLossFinal.png') + plt.close() + + + plt.figure() + plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Lungs') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.title('Dice coefficient: Train') + plt.savefig(savePath+'trainDice.png') + plt.close() + + plt.figure() + plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Lungs') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.title('Dice coefficient: Valid') + plt.savefig(savePath+'validDice.png') + plt.close() + + plt.figure() + plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Train') + plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Valid') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.savefig(savePath+'Dice_final.png') + plt.close() + + + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py new file mode 100644 index 00000000000..b098335b48a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py @@ -0,0 +1,330 @@ +#!/usr/bin/env python +# coding: utf-8 + +import numpy as np +import torch +import torch.nn as nn +from torch import optim +from tqdm import tqdm as tq +import time +from torch.utils import data +import os +import torch.nn.functional as F +from torch.autograd import Variable +import matplotlib.pyplot as plt +plt.switch_backend('agg') +from .sumnet_bn_vgg import SUMNet +from .r2unet import U_Net +from .r2unet import R2U_Net +from torchvision import transforms +import json +from PIL import Image +from .utils import dice_coefficient +from .data_loader import LungDataLoader + +class Discriminator(nn.Module): + def __init__(self,in_ch, out_ch): + super(Discriminator, self).__init__() + self.main = nn.Sequential( + # input is (nc) x 64 x 64 + nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + # state size. (64) x 32 x 32 + nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + # state size. (64*2) x 16 x 16 + nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(2), + # state size. (64*4) x 8 x 8 + nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.Sigmoid() + ) + def forward(self, input): + output = self.main(input) + return output.view(-1, 2) #.squeeze(1) + +def ch_shuffle(x): + shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) + shuffIdx2 = 1-shuffIdx1 + d_in = torch.Tensor(x.size()).cuda() + d_in[:,shuffIdx1] = x[:,0] + d_in[:,shuffIdx2] = x[:,1] + shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) + return d_in, shuffLabel + +def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epochs=35,lrate=1e-4): + """Training function for SUMNet,UNet,R2Unet + + Parameters + ---------- + fold_no: str + Fold number on which training is to be performed + save_path: str + Folder location to save the models and other plots + json_path: str + Folder location at which json files are stored + datapath: str + Folder location of data + lung_segpath: str + Folder location at which lung segmentation files are stored + network: str + Network to be trained + epochs: int, Default: 35 + Number of epochs for training + lrate: int, Default= 1e-4 + Learnig rate + + Returns + ------- + + None + """ + + + fold = 'fold'+str(fold_no) + savePath = savepath+'/'+network+'/'+fold+'/' + if not os.path.isdir(savePath): + os.makedirs(savePath) + + with open(jsonpath+fold+'_pos_neg_eq.json') as f: + json_file = json.load(f) + train_set = json_file['train_set'] + val_set = json_file['valid_set'] + + + trainDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="train_set",img_size=512) + valDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) + trainDataLoader = data.DataLoader(trainDset,batch_size=8,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) + validDataLoader = data.DataLoader(valDset,batch_size=8,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) + + if network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + if network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + if network == 'r2unet': + net = R2U_Net(img_ch=1,output_ch=2) + + + netD2 = Discriminator(in_ch=2,out_ch=2) + + use_gpu = torch.cuda.is_available() + + if use_gpu: + net = net.cuda() + netD2 = netD2.cuda() + + + optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + optimizerD2 = optim.Adam(netD2.parameters(), lr = 1e-4, weight_decay = 1e-5) + + + criterion = nn.BCEWithLogitsLoss() + criterionD = nn.BCELoss() + + epochs = epochs + trainLoss = [] + validLoss = [] + D2_losses = [] + trainDiceCoeff_lungs = [] + validDiceCoeff_lungs = [] + start = time.time() + + bestValidDice = torch.zeros(1) + bestValidDice_lungs = 0.0 + + for epoch in range(epochs): + epochStart = time.time() + trainRunningLoss = 0 + validRunningLoss = 0 + trainBatches = 0 + validBatches = 0 + trainDice_lungs = 0 + validDice_lungs = 0 + + + net.train(True) + + for data1 in tq(trainDataLoader): + img, mask = data1 + labels = mask + if use_gpu: + inputs = img.cuda() + labels = labels.cuda() + + net_out = net(Variable(inputs)) + net_out_sf = F.softmax(net_out,dim=1) + + + ############################ + # DISCRIMINATOR 2 TRAINING # + ############################ + + optimizerD2.zero_grad() + # Concatenate real (GT) and fake (segmented) samples along dim 1 + d_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + # Shuffling aling dim 1: {real,fake} OR {fake,real} + d_in,shuffLabel = ch_shuffle(d_in) + # D2 prediction + confr = netD2(Variable(d_in)).view(d_in.size(0),-1) + # Compute loss + LD2 = criterionD(confr,shuffLabel.float().cuda()) + # Compute gradients + LD2.backward() + # Backpropagate + optimizerD2.step() + # Appending loss for each batch into the list + D2_losses.append(LD2.item()) + optimizerD2.zero_grad() + d2_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + d2_in, d2_lb = ch_shuffle(d2_in) + conffs2 = netD2(d2_in).view(d2_in.size(0),-1) + LGadv2 = criterionD(conffs2,d2_lb.float().cuda()) # Aversarial loss 2 + + + + BCE_Loss = criterion(net_out[:,1],labels[:,1]) + + net_loss = BCE_Loss - 0.001*LGadv2 + + optimizer.zero_grad() + + net_loss.backward() + + optimizer.step() + + trainRunningLoss += net_loss.item() + + trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + trainDice_lungs += trainDice[0] + + trainBatches += 1 + # if trainBatches>1: + # # break + + trainLoss.append(trainRunningLoss/trainBatches) + trainDiceCoeff_lungs.append(trainDice_lungs/trainBatches) + + print("\n{}][{}]| Net_loss: {:.4f} | BCE_Loss: {:.4f} |adv_loss: {:.4f}" + .format(epoch,epochs,net_loss.item(),BCE_Loss,LGadv2) ) + + with torch.no_grad(): + for data1 in tq(validDataLoader): + + imgs, mask = data1 + labels = mask + if use_gpu: + inputs = imgs.cuda() + labels = labels.cuda() + + net_out = net(Variable(inputs)) + net_out_sf = F.softmax(net_out.data,dim=1) + + + BCE_Loss = criterion(net_out[:,1],labels[:,1]) + + net_loss = BCE_Loss + + + val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + validDice_lungs += val_dice[0] + validRunningLoss += net_loss.item() + validBatches += 1 + # if validBatches>1: + # break + + validLoss.append(validRunningLoss/validBatches) + validDiceCoeff_lungs.append(validDice_lungs/validBatches) + + + + if (validDice_lungs.cpu() > bestValidDice_lungs): + bestValidDice_lungs = validDice_lungs.cpu() + torch.save(net.state_dict(), savePath+'sumnet_adv_best_lungs.pt') + + plot=plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + if epoch==0: + plt.legend() + plt.savefig(savePath+'LossPlot.png') + plt.close() + epochEnd = time.time()-epochStart + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' + .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) + print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' + .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) + + print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) + trainLoss_np = np.array(trainLoss) + validLoss_np = np.array(validLoss) + trainDiceCoeff_lungs_np = np.array(trainDiceCoeff_lungs) + validDiceCoeff_lungs_np = np.array(validDiceCoeff_lungs) + + + print('Saving losses') + + torch.save(trainLoss_np, savePath+'trainLoss.pt') + torch.save(validLoss_np, savePath+'validLoss.pt') + torch.save(trainDiceCoeff_lungs_np, savePath+'trainDice_lungs.pt') + torch.save(validDiceCoeff_lungs_np, savePath+'validDice_lungs.pt') + + # if epoch>0: + # break + + end = time.time()-start + print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + + + plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + plt.title('Loss plot') + plt.legend() + plt.savefig(savePath+'trainLossFinal.png') + plt.close() + + + plt.figure() + plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Lungs') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.title('Dice coefficient: Train') + plt.savefig(savePath+'trainDice.png') + plt.close() + + plt.figure() + plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Lungs') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.title('Dice coefficient: Valid') + plt.savefig(savePath+'validDice.png') + plt.close() + + plt.figure() + plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Train') + plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Valid') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Dice coefficient') + plt.savefig(savePath+'Dice_final.png') + plt.close() + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py new file mode 100644 index 00000000000..0b9e4b62c9c --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py @@ -0,0 +1,220 @@ + +#!/usr/bin/env python +# coding: utf-8 + +from __future__ import print_function, division + +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +from torch.utils import data +from torchvision import transforms +import torchvision +from torchvision import datasets, models, transforms +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import matplotlib.pyplot as plt +import time +import os +import copy +from PIL import Image +from tqdm import tqdm_notebook as tq +from sklearn.metrics import confusion_matrix +from .data_loader import LungPatchDataLoader +from .lenet import LeNet + + +def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): + """Trains network to classify patches based on the presence of nodule + + Parameters + ---------- + savepath: str + Folder location to save the plots and model + imgpath: + Folder location where patch images are stored. + lrate: int,Default = 1e-4 + Learning rate + epochs: int, default = 35 + Total epochs + + Returns + ------- + + None + """ + + trainDset = LungPatchDataLoader(imgpath=imgpath,is_transform=True,split="train_set") + valDset = LungPatchDataLoader(imgpath=imgpath,is_transform=True,split="valid_set") + trainDataLoader = data.DataLoader(trainDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) + validDataLoader = data.DataLoader(valDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) + + + savePath = savepath + if not os.path.isdir(savePath): + os.makedirs(savePath) + + trainDset = LungDataLoader(is_transform=True,split="train") + valDset = LungDataLoader(is_transform=True,split="valid") + trainDataLoader = data.DataLoader(trainDset,batch_size=32,shuffle=True,num_workers=4,pin_memory=True) + validDataLoader = data.DataLoader(valDset,batch_size=32,shuffle=False,num_workers=4,pin_memory=True) + + net = LeNet() + + use_gpu = torch.cuda.is_available() + if use_gpu: + net = net.cuda() + + optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + criterion = nn.BCEWithLogitsLoss() + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5, verbose=True) + + epochs = epochs + trainLoss = [] + validLoss = [] + trainAcc = [] + validAcc = [] + start = time.time() + bestValidAcc = 0.0 + + for epoch in range(epochs): + epochStart = time.time() + trainRunningLoss = 0 + validRunningLoss = 0 + trainRunningCorrects = 0 + validRunningCorrects = 0 + trainBatches = 0 + validBatches = 0 + + net.train(True) + + for data1 in tq(trainDataLoader): + img, label = data1 + if use_gpu: + inputs = img.cuda() + label = label.cuda() + + net_out = net(Variable(inputs)) + + net_loss = criterion(net_out,label.float()) + preds = torch.zeros(net_out.shape).cuda() + preds[net_out > 0.5] = 1 + preds[net_out <= 0.5] = 0 + + optimizer.zero_grad() + + net_loss.backward() + + optimizer.step() + + trainRunningLoss += net_loss.item() + for i in range(len(preds[:,0])): + if preds[:,0][i] == label[:,0][i].float(): + trainRunningCorrects += 1 + + trainBatches += 1 + # if trainBatches>1: + # break + + trainepoch_loss = trainRunningLoss/trainBatches + trainepoch_acc = 100*(int(trainRunningCorrects)/32594) + trainLoss.append(trainepoch_loss) + trainAcc.append(trainepoch_acc) + + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} |Train running : {:.5f}| Train acc: {:.5f} ' + .format(epoch+1, epochs, trainepoch_loss,trainRunningCorrects,trainepoch_acc)) + + with torch.no_grad(): + for data1 in tq(validDataLoader): + img, label = data1 + if use_gpu: + inputs = img.cuda() + label = label.float() + label = label.cuda() + + net_out = net(Variable(inputs)) + + net_loss = criterion(net_out,label) + preds = torch.zeros(net_out.shape).cuda() + preds[net_out > 0.5] = 1 + preds[net_out <= 0.5] = 0 + + validRunningLoss += net_loss.item() + for i in range(len(preds[:,0])): + if preds[:,0][i] == label[:,0][i].float(): + validRunningCorrects += 1 + + validBatches += 1 + # if validBatches>10: + # break + + validepoch_loss = validRunningLoss/validBatches + validepoch_acc = 100*(int(validRunningCorrects)/3666) + validLoss.append(validepoch_loss) + validAcc.append(validepoch_acc) + + print('{:.0f} Loss: {:.4f} | accuracy: {:.4f} '.format( + epoch, validepoch_loss,validepoch_acc)) + + if (validepoch_acc > bestValidAcc): + bestValidAcc = validepoch_acc + torch.save(net.state_dict(), savePath+'lenet_best.pt') + + scheduler.step(validepoch_loss) + + plot=plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + if epoch==0: + plt.legend() + plt.savefig(savePath+'LossPlot.png') + plt.close() + epochEnd = time.time()-epochStart + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' + .format(epoch+1, epochs, trainepoch_loss, validepoch_loss)) + print('Accuracy | Train_acc {:.5f} | Valid_acc {:.5f} |' + .format(trainepoch_acc,validepoch_acc)) + + + print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) + trainLoss_np = np.array(trainLoss) + validLoss_np = np.array(validLoss) + trainAcc_np = np.array(trainAcc) + validAcc_np = np.array(validAcc) + + print('Saving losses') + + torch.save(trainLoss_np, savePath+'trainLoss.pt') + torch.save(validLoss_np, savePath+'validLoss.pt') + torch.save(trainAcc_np, savePath+'train_acc.pt') + torch.save(validAcc_np, savePath+'valid_acc.pt') + + # if epoch>1: + # break + + end = time.time()-start + print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + plt.figure() + plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') + plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') + plt.xlabel('Epochs') + plt.ylabel('Loss') + plt.title('Loss plot') + plt.legend() + plt.savefig(savePath+'trainLossFinal.png') + plt.close() + + + plt.figure() + plt.plot(range(len(trainAcc)),trainAcc,'-r',label='Train') + plt.plot(range(len(validAcc)),validAcc,'-g',label='Valid') + plt.legend() + plt.xlabel('Epochs') + plt.ylabel('Accuracy') + plt.title('Accuracy Plot') + plt.savefig(savePath+'acc_plot.png') + plt.close() \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py new file mode 100644 index 00000000000..88164e15411 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py @@ -0,0 +1,425 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import init + +def init_weights(net, init_type='normal', gain=0.02): + def init_func(m): + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) + +class conv_block(nn.Module): + def __init__(self,ch_in,ch_out): + super(conv_block,self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True), + nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + + def forward(self,x): + x = self.conv(x) + return x + +class up_conv(nn.Module): + def __init__(self,ch_in,ch_out): + super(up_conv,self).__init__() + self.up = nn.Sequential( + nn.Upsample(scale_factor=2), + nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + x = self.up(x) + return x + +class Recurrent_block(nn.Module): + def __init__(self,ch_out,t=2): + super(Recurrent_block,self).__init__() + self.t = t + self.ch_out = ch_out + self.conv = nn.Sequential( + nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + for i in range(self.t): + + if i==0: + x1 = self.conv(x) + + x1 = self.conv(x+x1) + return x1 + +class RRCNN_block(nn.Module): + def __init__(self,ch_in,ch_out,t=2): + super(RRCNN_block,self).__init__() + self.RCNN = nn.Sequential( + Recurrent_block(ch_out,t=t), + Recurrent_block(ch_out,t=t) + ) + self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0) + + def forward(self,x): + x = self.Conv_1x1(x) + x1 = self.RCNN(x) + return x+x1 + + +class single_conv(nn.Module): + def __init__(self,ch_in,ch_out): + super(single_conv,self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + x = self.conv(x) + return x + +class Attention_block(nn.Module): + def __init__(self,F_g,F_l,F_int): + super(Attention_block,self).__init__() + self.W_g = nn.Sequential( + nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(F_int) + ) + + self.W_x = nn.Sequential( + nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(F_int) + ) + + self.psi = nn.Sequential( + nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True), + nn.BatchNorm2d(1), + nn.Sigmoid() + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self,g,x): + g1 = self.W_g(g) + x1 = self.W_x(x) + psi = self.relu(g1+x1) + psi = self.psi(psi) + + return x*psi + + +class U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1): + super(U_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + + self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) + self.Conv2 = conv_block(ch_in=64,ch_out=128) + self.Conv3 = conv_block(ch_in=128,ch_out=256) + self.Conv4 = conv_block(ch_in=256,ch_out=512) + self.Conv5 = conv_block(ch_in=512,ch_out=1024) + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_conv4 = conv_block(ch_in=512, ch_out=256) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_conv3 = conv_block(ch_in=256, ch_out=128) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_conv2 = conv_block(ch_in=128, ch_out=64) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.Conv1(x) + + x2 = self.Maxpool(x1) + x2 = self.Conv2(x2) + + x3 = self.Maxpool(x2) + x3 = self.Conv3(x3) + + x4 = self.Maxpool(x3) + x4 = self.Conv4(x4) + + x5 = self.Maxpool(x4) + x5 = self.Conv5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + + d5 = self.Up_conv5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_conv4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_conv3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_conv2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + +class R2U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1,t=2): + super(R2U_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Upsample = nn.Upsample(scale_factor=2) + + self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) + + self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) + + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) + + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) + + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) + + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.RRCNN1(x) + + x2 = self.Maxpool(x1) + x2 = self.RRCNN2(x2) + + x3 = self.Maxpool(x2) + x3 = self.RRCNN3(x3) + + x4 = self.Maxpool(x3) + x4 = self.RRCNN4(x4) + + x5 = self.Maxpool(x4) + x5 = self.RRCNN5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_RRCNN5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_RRCNN4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_RRCNN3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_RRCNN2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + + +class AttU_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1): + super(AttU_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + + self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) + self.Conv2 = conv_block(ch_in=64,ch_out=128) + self.Conv3 = conv_block(ch_in=128,ch_out=256) + self.Conv4 = conv_block(ch_in=256,ch_out=512) + self.Conv5 = conv_block(ch_in=512,ch_out=1024) + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) + self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) + self.Up_conv4 = conv_block(ch_in=512, ch_out=256) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) + self.Up_conv3 = conv_block(ch_in=256, ch_out=128) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) + self.Up_conv2 = conv_block(ch_in=128, ch_out=64) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.Conv1(x) + + x2 = self.Maxpool(x1) + x2 = self.Conv2(x2) + + x3 = self.Maxpool(x2) + x3 = self.Conv3(x3) + + x4 = self.Maxpool(x3) + x4 = self.Conv4(x4) + + x5 = self.Maxpool(x4) + x5 = self.Conv5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + x4 = self.Att5(g=d5,x=x4) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_conv5(d5) + + d4 = self.Up4(d5) + x3 = self.Att4(g=d4,x=x3) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_conv4(d4) + + d3 = self.Up3(d4) + x2 = self.Att3(g=d3,x=x2) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_conv3(d3) + + d2 = self.Up2(d3) + x1 = self.Att2(g=d2,x=x1) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_conv2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + + +class R2AttU_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1,t=2): + super(R2AttU_Net,self).__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Upsample = nn.Upsample(scale_factor=2) + + self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) + + self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) + + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) + + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) + + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) + + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) + self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) + self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) + self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) + self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.RRCNN1(x) + + x2 = self.Maxpool(x1) + x2 = self.RRCNN2(x2) + + x3 = self.Maxpool(x2) + x3 = self.RRCNN3(x3) + + x4 = self.Maxpool(x3) + x4 = self.RRCNN4(x4) + + x5 = self.Maxpool(x4) + x5 = self.RRCNN5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + x4 = self.Att5(g=d5,x=x4) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_RRCNN5(d5) + + d4 = self.Up4(d5) + x3 = self.Att4(g=d4,x=x3) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_RRCNN4(d4) + + d3 = self.Up3(d4) + x2 = self.Att3(g=d3,x=x2) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_RRCNN3(d3) + + d2 = self.Up2(d3) + x1 = self.Att2(g=d2,x=x1) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_RRCNN2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py new file mode 100644 index 00000000000..e2fa72290b2 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Nov 6 22:57:38 2018 + +@author: sumanthnandamuri +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import models + +class SUMNet(nn.Module): + def __init__(self,in_ch,out_ch): + super(SUMNet, self).__init__() + + self.encoder = models.vgg11_bn(pretrained = True).features + self.preconv = nn.Conv2d(in_ch, 3, 1) + self.conv1 = self.encoder[0] + self.bn1 = self.encoder[1] + self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv2 = self.encoder[4] + self.bn2 = self.encoder[5] + self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv3a = self.encoder[8] + self.bn3 = self.encoder[9] + self.conv3b = self.encoder[11] + self.bn4 = self.encoder[12] + self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv4a = self.encoder[15] + self.bn5 = self.encoder[16] + self.conv4b = self.encoder[18] + self.bn6 = self.encoder[19] + self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv5a = self.encoder[22] + self.bn7 = self.encoder[23] + self.conv5b = self.encoder[25] + self.bn8 = self.encoder[26] + self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) + + self.unpool5 = nn.MaxUnpool2d(2, 2) + self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) + self.unpool4 = nn.MaxUnpool2d(2, 2) + self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) + self.unpool3 = nn.MaxUnpool2d(2, 2) + self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) + self.donv3a = nn.Conv2d(256,128, 3, padding = 1) + self.unpool2 = nn.MaxUnpool2d(2, 2) + self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) + self.unpool1 = nn.MaxUnpool2d(2, 2) + self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) + self.output = nn.Conv2d(32, out_ch, 1) + + def forward(self, x): + preconv = F.relu(self.preconv(x), inplace = True) + conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) + pool1, idxs1 = self.pool1(conv1) + conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) + pool2, idxs2 = self.pool2(conv2) + conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) + conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) + pool3, idxs3 = self.pool3(conv3b) + conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) + conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) + pool4, idxs4 = self.pool4(conv4b) + conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) + conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) + pool5, idxs5 = self.pool5(conv5b) + + unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) + donv5b = F.relu(self.donv5b(unpool5), inplace = True) + donv5a = F.relu(self.donv5a(donv5b), inplace = True) + unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) + donv4b = F.relu(self.donv4b(unpool4), inplace = True) + donv4a = F.relu(self.donv4a(donv4b), inplace = True) + unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) + donv3b = F.relu(self.donv3b(unpool3), inplace = True) + donv3a = F.relu(self.donv3a(donv3b)) + unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) + donv2 = F.relu(self.donv2(unpool2), inplace = True) + unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) + donv1 = F.relu(self.donv1(unpool1), inplace = True) + output = self.output(donv1) + return output \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py new file mode 100644 index 00000000000..3f89a37a4fd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py @@ -0,0 +1,28 @@ +import numpy as np +import torch +from torch.utils import data + +def dice_coefficient(pred1, target): + smooth = 1e-15 + pred = torch.argmax(pred1,dim=1) + num = pred.size()[0] + pred_1_hot = torch.eye(3)[pred.squeeze(1)].cuda() + pred_1_hot = pred_1_hot.permute(0, 3, 1, 2).float() + + target_1_hot = torch.eye(3)[target].cuda() + target_1_hot = target_1_hot.permute(0,3, 1, 2).float() + + m1_1 = pred_1_hot[:,1,:,:].view(num, -1).float() + m2_1 = target_1_hot[:,1,:,:].view(num, -1).float() + m1_2 = pred_1_hot[:,2,:,:].view(num, -1).float() + m2_2 = target_1_hot[:,2,:,:].view(num, -1).float() + + intersection_1 = (m1_1*m2_1).sum(1) + intersection_2 = (m1_2*m2_2).sum(1) + union_1 = (m1_1+m2_1).sum(1) + smooth - intersection_1 + union_2 = (m1_2+m2_2).sum(1) + smooth - intersection_2 + score_1 = intersection_1/union_1 + score_2 = intersection_2/union_2 + + return [score_1.mean()] + \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py new file mode 100644 index 00000000000..e69de29bb2d From c83f842feae15217507c9b49af93673a93e44d33 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Fri, 30 Dec 2022 18:53:06 +0530 Subject: [PATCH 02/47] updates in readme --- .../lung_nodule_detection/ReadMe.md | 67 +-- .../src/data_prep/prep_pack/__init__.py | 0 .../src/{inference => }/inference.py | 7 +- .../src/inference/infer_pack/data_loader.py | 127 ------ .../src/{data_prep => }/prepare_data.py | 28 +- .../src/{training => }/train_network.py | 6 +- .../src/training/train_pack/__init__.py | 1 - .../src/training/train_pack/lenet.py | 36 -- .../src/training/train_pack/r2unet.py | 425 ------------------ .../src/training/train_pack/sumnet_bn_vgg.py | 86 ---- .../src/training/train_pack/utils.py | 28 -- .../prep_pack => utils}/create_folds.py | 0 .../train_pack => utils}/data_loader.py | 18 +- .../prep_pack => utils}/generate_patches.py | 0 .../prep_pack => utils}/generate_slices.py | 5 - .../infer_pack => utils}/infer_lung_seg.py | 0 .../infer_patch_classifier.py | 0 .../{inference/infer_pack => utils}/lenet.py | 0 .../train_pack => utils}/lung_seg.py | 0 .../train_pack => utils}/lung_seg_adv.py | 0 .../train_pack => utils}/patch_classifier.py | 0 .../{inference/infer_pack => utils}/r2unet.py | 0 .../infer_pack => utils}/sumnet_bn_vgg.py | 0 .../{inference/infer_pack => utils}/utils.py | 0 .../prep_pack => utils}/visualize.py | 0 25 files changed, 60 insertions(+), 774 deletions(-) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference => }/inference.py (88%) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py rename misc/pytorch_toolkit/lung_nodule_detection/src/{data_prep => }/prepare_data.py (85%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{training => }/train_network.py (95%) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py rename misc/pytorch_toolkit/lung_nodule_detection/src/{data_prep/prep_pack => utils}/create_folds.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{training/train_pack => utils}/data_loader.py (96%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{data_prep/prep_pack => utils}/generate_patches.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{data_prep/prep_pack => utils}/generate_slices.py (99%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/infer_lung_seg.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/infer_patch_classifier.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/lenet.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{training/train_pack => utils}/lung_seg.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{training/train_pack => utils}/lung_seg_adv.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{training/train_pack => utils}/patch_classifier.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/r2unet.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/sumnet_bn_vgg.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{inference/infer_pack => utils}/utils.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/{data_prep/prep_pack => utils}/visualize.py (100%) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md index 833bfd14fe1..c49dc158496 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md +++ b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md @@ -2,17 +2,21 @@ Lung cancer is the most common form of cancer found worldwide with a high mortality rate. Early detection of pulmonary nodules by screening with a low-dose computed tomography (CT) scan is crucial for its effective clinical management. Nodules which are symptomatic of malignancy occupy about 0.0125 - 0.025% of volume in a CT scan of a patient. Manual screening of all slices is a tedious task and presents a high risk of human errors. To tackle this problem we propose a computationally efficient two-stage framework. In the first stage, a convolutional neural network (CNN) trained adversarially using Turing test loss segments the lung region. In the second stage, patches sampled from the segmented region are then classified to detect the presence of nodules. The proposed method is experimentally validated on the LUNA16 challenge dataset with a dice coefficient of **0.984 ± 0.0007** for 10-fold cross-validation. -**Paper** : [arXiv](https://arxiv.org/abs/2006.09308v1) + +>**Paper** : [IEEE Xplore](https://ieeexplore.ieee.org/abstract/document/9175649) + BibTeX reference to cite, if you use it: ```bibtex -@inproceedings{Sathish2020LungSA, -title={Lung Segmentation and Nodule Detection in - Computed Tomography Scan using a Convolutional Neural Network - Trained Adversarially using Turing Test Loss}, - author={Rakshith Sathish and Rachana Sathish - and Ramanathan Sethuraman and Debdoot Sheet}, - year={2020} } +@INPROCEEDINGS{9175649, + author={Sathish, Rakshith and Sathish, Rachana and Sethuraman, Ramanathan and Sheet, Debdoot}, + booktitle={2020 42nd Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC)}, + title={Lung Segmentation and Nodule Detection in Computed Tomography Scan using a Convolutional Neural Network Trained Adversarially using Turing Test Loss}, + year={2020}, + volume={}, + number={}, + pages={1331-1334}, + doi={10.1109/EMBC44109.2020.9175649}} ``` ## Dataset used @@ -34,11 +38,10 @@ The ground truth annotations were marked in a two-phase image annotation process Code directory is organised into 3 subfolders; Data preparation, Training and Evaluation. Each of these subfolders has a .py file and a package folder containing function definitions. ## Requirements -Create a conda virtual environment with +Create a virtual environment with all dependencies using ``` -conda create --name --file requirements.txt +sh init_venv.sh ``` -This would create a virtual environment with all the necessary packages of the same version used during development. ## Data preparation Follow the below steps to prepare and organise the data for training. @@ -86,29 +89,31 @@ To evaluate the classifier network execute ## Pre-trained Models ## Results -## Support -If you face any issues while executing the codes, raise an issue with rakshith.sathish@gmail.com -# Authors and Acknowledgment -- Rakshith Sathish -- Rachana Sathish -- Ramanathan Sethuraman -- Debdoot Sheet -> This work was supported through a research grant from Intel India Grand -Challenge 2016 for Project MIRIAD +## Acknowledgement + + This work is undertaken as part of Intel India Grand Challenge 2016 Project MIRIAD: Many Incarnations of Screening of Radiology for High Throughput Disease Screening via Multiple Instance Reinforcement Learning with Adversarial Deep Neural Networks, sponsored by Intel Technology India Pvt. Ltd., Bangalore, India. + +**Principal Investigators** + +Dr Debdoot Sheet, Dr Nirmalya Ghosh (Co-PI)
+Department of Electrical Engineering,
+Indian Institute of Technology Kharagpur
+email: debdoot@ee.iitkgp.ac.in, nirmalya@ee.iitkgp.ac.in + +Dr Ramanathan Sethuraman,
+Intel Technology India Pvt. Ltd.
+email: ramanathan.sethuraman@intel.com - # License -Copyright [2020] [IITKLIV] +**Contributor** -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +The codes/model was contributed to the OpenVINO project by - http://www.apache.org/licenses/LICENSE-2.0 + Rakshith Sathish,
+Advanced Technology Development Center,
+Indian Institute of Technology Kharagpur
+email: rakshith.sathish@kgpian.iitkgp.ac.in
+Github username: Rakshith2597 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +## References diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py similarity index 88% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/inference.py index dc4288dfd2c..fea91ba64f0 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py @@ -1,5 +1,5 @@ -from infer_pack import infer_lung_seg -from infer_pack import infer_patch_classifier +from utils import infer_lung_seg +from utils import infer_patch_classifier import argparse def main(args): @@ -13,7 +13,7 @@ def main(args): else: savepath = args.savepath imgpath = args.imgpath - patch_classifier.lungpatch_classifier(savepath,imgpath) + infer_patch_classifier.lungpatch_classifier(savepath,imgpath) if __name__ == '__main__': @@ -36,7 +36,6 @@ def main(args): parser.add_argument('--network', help='Network to be trained') - args= parser.parse_args() main(args) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py deleted file mode 100644 index 1c0fc1e8cb3..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/data_loader.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -import torch -from torch.utils import data -import os -from torchvision import transforms -from PIL import Image -import numpy as np -from glob import glob -from natsort import natsorted - - - -class LungDataLoader(data.Dataset): - """Class represents the dataloader for Lung segmentation task - - Atributes - --------- - datapath: str - Folder location where img is stored - lung_path: str - Folder location where lung seg mask is stored - json_file: str - Folder location where json files are stored - split: str - String to determine train/val and test set - is_transform: Boolean - True if transformation is to be applied - img_size: int - Size of input image - - """ - def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= True,img_size= 512): - - self.split=split - self.path=path - self.lung_path=lung_path - self.json = json_file - self.files = self.json[self.split] - self.img_size= img_size - self.is_transform= is_transform - self.image_tf= transforms.Compose( - [transforms.Resize(self.img_size), - transforms.ToTensor() - ]) - - self.lung_tf = transforms.Compose( - [transforms.Resize(self.img_size), - transforms.ToTensor() - ]) - - - def __len__(self): - return len(self.files) - - - - def __getitem__(self,index): - - filename = self.files[index] - img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) - lung_mask = Image.fromarray(np.load(self.lung_path+filename).astype(float)) - - if self.is_transform: - img, lung_mask = self.transform(img,lung_mask) - labels = torch.cat((1.-lung_mask,lung_mask)) # - - return img, labels - - def transform(self,img,lung_mask): - img = self.image_tf(img) - img = img.type(torch.FloatTensor) - lung_mask = self.lung_tf(lung_mask) - lung_mask = lung_mask.type(torch.FloatTensor) - - - return img,lung_mask - - - - - -class LungPatchDataLoader(data.Dataset): - - def __init__(self,imgpath,split="train",is_transform= True): - - self.split = split - - self.imgpath = imgpath+self.split+'/img/' - self.is_transform = is_transform - self.files = os.listdir(self.imgpath) - - - def __len__(self): - return len(self.files) - - - - def __getitem__(self,index): - - filename = self.files[index] - l1 = int(filename.split('_')[1]) - if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 - l2 = 0 - else: - l2 = 1 - label = torch.tensor([l1,l2]) - img = np.load(self.imgpath+filename) - size_dataset = len(os.listdir(self.imgpath)) - - - - if self.is_transform: - img= self.transform(img) - - return img,label - - - - def transform(self,img): - img = torch.Tensor(img).unsqueeze(0) - img = img.type(torch.FloatTensor) - - - return img - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py similarity index 85% rename from misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py index 671044c0db4..471d2611b1e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prepare_data.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py @@ -1,8 +1,8 @@ import argparse -from prep_pack import visualize -from prep_pack import generate_slices -from prep_pack import create_folds -from prep_pack import generate_patches +from utils import visualize +from utils import generate_slices +from utils import create_folds +from utils import generate_patches def main(args): @@ -65,38 +65,38 @@ def main(args): parser.add_argument('--genslice', default=False, action='store_true', help='To create slices from 3D volume') parser.add_argument('--createfolds', default=False, action='store_true', - help='Split dataset into 10 folds') + help='Split dataset into 10 folds') parser.add_argument('--genpatch', default=False, action='store_true', help='To create patches from 2D slices') parser.add_argument('--visualize', default=False, action='store_true', - help='Visualize any one of the slices') - - + help='Visualize any one of the slices') parser.add_argument('--savepath', help='Folder location to save the files') parser.add_argument('--masktype', help='Type of mask to be generated. ie, nodule or lung') parser.add_argument('--datasetpath', - help='Folder location of downloaded dataset') + help='Folder location of downloaded dataset') parser.add_argument('--foldno', help='Fold number') parser.add_argument('--additional', default=False, action='store_true', - help='Add additional slices') + help='Add additional slices') parser.add_argument('--category', - help='Category of data.[trainset,valset,testset]') + help='Category of data.[trainset,valset,testset]') parser.add_argument('--jsonpath', help='Folder location where jsons are stored') parser.add_argument('--datapath', help='Folder location containing img and mask folders') - parser.add_argument('--lungsegpath', + parser.add_argument('--lungsegpath', help='Folder containing lung segmentation mask') - parser.add_argument('--patchtype', + parser.add_argument('--patchtype', help='positive or negative') parser.add_argument('--sliceno', help='Slice number to visualize') parser.add_argument('--seriesuid', help='Seriesuid of slice to visualize') - args= parser.parse_args() + args=parser.parse_args() + + main(args) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py similarity index 95% rename from misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py index c93741f206c..5942073125d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_network.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py @@ -1,6 +1,6 @@ -from train_pack import lung_seg -from train_pack import lung_seg_adv -from train_pack import patch_classifier +from utils import lung_seg +from utils import lung_seg_adv +from utils import patch_classifier import argparse def main(args): diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py deleted file mode 100644 index 8b137891791..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py deleted file mode 100644 index ae265121d9a..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lenet.py +++ /dev/null @@ -1,36 +0,0 @@ - - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class LeNet(nn.Module): - def __init__(self): - super(LeNet, self).__init__() - self.conv1 = nn.Conv2d(1, 6, kernel_size=5) - self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv2 = nn.Conv2d(6, 16, kernel_size=5) - self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv3 = nn.Conv2d(16, 16, kernel_size=5) - self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv3_drop = nn.Dropout2d() - self.fc1 = nn.Linear(256, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 2) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = self.pool1(x) - x = F.relu(self.conv2(x)) - x = self.pool2(x) - x = F.relu(self.conv3_drop(self.conv3(x))) - x = self.pool3(x) - x = x.view(-1, 256) - x = F.relu(self.fc1(x)) - x = F.dropout(x, training=self.training) - x = F.relu(self.fc2(x)) - x = F.dropout(x, training=self.training) - x = self.fc3(x) - return torch.sigmoid(x) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py deleted file mode 100644 index 88164e15411..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/r2unet.py +++ /dev/null @@ -1,425 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import init - -def init_weights(net, init_type='normal', gain=0.02): - def init_func(m): - classname = m.__class__.__name__ - if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'kaiming': - init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - init.orthogonal_(m.weight.data, gain=gain) - else: - raise NotImplementedError('initialization method [%s] is not implemented' % init_type) - if hasattr(m, 'bias') and m.bias is not None: - init.constant_(m.bias.data, 0.0) - elif classname.find('BatchNorm2d') != -1: - init.normal_(m.weight.data, 1.0, gain) - init.constant_(m.bias.data, 0.0) - - print('initialize network with %s' % init_type) - net.apply(init_func) - -class conv_block(nn.Module): - def __init__(self,ch_in,ch_out): - super(conv_block,self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True), - nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - - def forward(self,x): - x = self.conv(x) - return x - -class up_conv(nn.Module): - def __init__(self,ch_in,ch_out): - super(up_conv,self).__init__() - self.up = nn.Sequential( - nn.Upsample(scale_factor=2), - nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - x = self.up(x) - return x - -class Recurrent_block(nn.Module): - def __init__(self,ch_out,t=2): - super(Recurrent_block,self).__init__() - self.t = t - self.ch_out = ch_out - self.conv = nn.Sequential( - nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - for i in range(self.t): - - if i==0: - x1 = self.conv(x) - - x1 = self.conv(x+x1) - return x1 - -class RRCNN_block(nn.Module): - def __init__(self,ch_in,ch_out,t=2): - super(RRCNN_block,self).__init__() - self.RCNN = nn.Sequential( - Recurrent_block(ch_out,t=t), - Recurrent_block(ch_out,t=t) - ) - self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0) - - def forward(self,x): - x = self.Conv_1x1(x) - x1 = self.RCNN(x) - return x+x1 - - -class single_conv(nn.Module): - def __init__(self,ch_in,ch_out): - super(single_conv,self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - x = self.conv(x) - return x - -class Attention_block(nn.Module): - def __init__(self,F_g,F_l,F_int): - super(Attention_block,self).__init__() - self.W_g = nn.Sequential( - nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(F_int) - ) - - self.W_x = nn.Sequential( - nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(F_int) - ) - - self.psi = nn.Sequential( - nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(1), - nn.Sigmoid() - ) - - self.relu = nn.ReLU(inplace=True) - - def forward(self,g,x): - g1 = self.W_g(g) - x1 = self.W_x(x) - psi = self.relu(g1+x1) - psi = self.psi(psi) - - return x*psi - - -class U_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1): - super(U_Net,self).__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - - self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) - self.Conv2 = conv_block(ch_in=64,ch_out=128) - self.Conv3 = conv_block(ch_in=128,ch_out=256) - self.Conv4 = conv_block(ch_in=256,ch_out=512) - self.Conv5 = conv_block(ch_in=512,ch_out=1024) - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Up_conv2 = conv_block(ch_in=128, ch_out=64) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.Conv1(x) - - x2 = self.Maxpool(x1) - x2 = self.Conv2(x2) - - x3 = self.Maxpool(x2) - x3 = self.Conv3(x3) - - x4 = self.Maxpool(x3) - x4 = self.Conv4(x4) - - x5 = self.Maxpool(x4) - x5 = self.Conv5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - d5 = torch.cat((x4,d5),dim=1) - - d5 = self.Up_conv5(d5) - - d4 = self.Up4(d5) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_conv4(d4) - - d3 = self.Up3(d4) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_conv3(d3) - - d2 = self.Up2(d3) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_conv2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - -class R2U_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1,t=2): - super(R2U_Net,self).__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - self.Upsample = nn.Upsample(scale_factor=2) - - self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) - - self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - - self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - - self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - - self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.RRCNN1(x) - - x2 = self.Maxpool(x1) - x2 = self.RRCNN2(x2) - - x3 = self.Maxpool(x2) - x3 = self.RRCNN3(x3) - - x4 = self.Maxpool(x3) - x4 = self.RRCNN4(x4) - - x5 = self.Maxpool(x4) - x5 = self.RRCNN5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_RRCNN5(d5) - - d4 = self.Up4(d5) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_RRCNN4(d4) - - d3 = self.Up3(d4) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_RRCNN3(d3) - - d2 = self.Up2(d3) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_RRCNN2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - - -class AttU_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1): - super(AttU_Net,self).__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - - self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) - self.Conv2 = conv_block(ch_in=64,ch_out=128) - self.Conv3 = conv_block(ch_in=128,ch_out=256) - self.Conv4 = conv_block(ch_in=256,ch_out=512) - self.Conv5 = conv_block(ch_in=512,ch_out=1024) - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) - self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) - self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) - self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) - self.Up_conv2 = conv_block(ch_in=128, ch_out=64) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.Conv1(x) - - x2 = self.Maxpool(x1) - x2 = self.Conv2(x2) - - x3 = self.Maxpool(x2) - x3 = self.Conv3(x3) - - x4 = self.Maxpool(x3) - x4 = self.Conv4(x4) - - x5 = self.Maxpool(x4) - x5 = self.Conv5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - x4 = self.Att5(g=d5,x=x4) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_conv5(d5) - - d4 = self.Up4(d5) - x3 = self.Att4(g=d4,x=x3) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_conv4(d4) - - d3 = self.Up3(d4) - x2 = self.Att3(g=d3,x=x2) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_conv3(d3) - - d2 = self.Up2(d3) - x1 = self.Att2(g=d2,x=x1) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_conv2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - -class R2AttU_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1,t=2): - super(R2AttU_Net,self).__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - self.Upsample = nn.Upsample(scale_factor=2) - - self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) - - self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - - self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - - self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - - self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) - self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) - self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) - self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) - self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.RRCNN1(x) - - x2 = self.Maxpool(x1) - x2 = self.RRCNN2(x2) - - x3 = self.Maxpool(x2) - x3 = self.RRCNN3(x3) - - x4 = self.Maxpool(x3) - x4 = self.RRCNN4(x4) - - x5 = self.Maxpool(x4) - x5 = self.RRCNN5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - x4 = self.Att5(g=d5,x=x4) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_RRCNN5(d5) - - d4 = self.Up4(d5) - x3 = self.Att4(g=d4,x=x3) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_RRCNN4(d4) - - d3 = self.Up3(d4) - x2 = self.Att3(g=d3,x=x2) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_RRCNN3(d3) - - d2 = self.Up2(d3) - x1 = self.Att2(g=d2,x=x1) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_RRCNN2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py deleted file mode 100644 index e2fa72290b2..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/sumnet_bn_vgg.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Nov 6 22:57:38 2018 - -@author: sumanthnandamuri -""" -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import models - -class SUMNet(nn.Module): - def __init__(self,in_ch,out_ch): - super(SUMNet, self).__init__() - - self.encoder = models.vgg11_bn(pretrained = True).features - self.preconv = nn.Conv2d(in_ch, 3, 1) - self.conv1 = self.encoder[0] - self.bn1 = self.encoder[1] - self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv2 = self.encoder[4] - self.bn2 = self.encoder[5] - self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv3a = self.encoder[8] - self.bn3 = self.encoder[9] - self.conv3b = self.encoder[11] - self.bn4 = self.encoder[12] - self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv4a = self.encoder[15] - self.bn5 = self.encoder[16] - self.conv4b = self.encoder[18] - self.bn6 = self.encoder[19] - self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv5a = self.encoder[22] - self.bn7 = self.encoder[23] - self.conv5b = self.encoder[25] - self.bn8 = self.encoder[26] - self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) - - self.unpool5 = nn.MaxUnpool2d(2, 2) - self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) - self.unpool4 = nn.MaxUnpool2d(2, 2) - self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) - self.unpool3 = nn.MaxUnpool2d(2, 2) - self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) - self.donv3a = nn.Conv2d(256,128, 3, padding = 1) - self.unpool2 = nn.MaxUnpool2d(2, 2) - self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) - self.unpool1 = nn.MaxUnpool2d(2, 2) - self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) - self.output = nn.Conv2d(32, out_ch, 1) - - def forward(self, x): - preconv = F.relu(self.preconv(x), inplace = True) - conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) - pool1, idxs1 = self.pool1(conv1) - conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) - pool2, idxs2 = self.pool2(conv2) - conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) - conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) - pool3, idxs3 = self.pool3(conv3b) - conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) - conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) - pool4, idxs4 = self.pool4(conv4b) - conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) - conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) - pool5, idxs5 = self.pool5(conv5b) - - unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) - donv5b = F.relu(self.donv5b(unpool5), inplace = True) - donv5a = F.relu(self.donv5a(donv5b), inplace = True) - unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) - donv4b = F.relu(self.donv4b(unpool4), inplace = True) - donv4a = F.relu(self.donv4a(donv4b), inplace = True) - unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) - donv3b = F.relu(self.donv3b(unpool3), inplace = True) - donv3a = F.relu(self.donv3a(donv3b)) - unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) - donv2 = F.relu(self.donv2(unpool2), inplace = True) - unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) - donv1 = F.relu(self.donv1(unpool1), inplace = True) - output = self.output(donv1) - return output \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py deleted file mode 100644 index 3f89a37a4fd..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/utils.py +++ /dev/null @@ -1,28 +0,0 @@ -import numpy as np -import torch -from torch.utils import data - -def dice_coefficient(pred1, target): - smooth = 1e-15 - pred = torch.argmax(pred1,dim=1) - num = pred.size()[0] - pred_1_hot = torch.eye(3)[pred.squeeze(1)].cuda() - pred_1_hot = pred_1_hot.permute(0, 3, 1, 2).float() - - target_1_hot = torch.eye(3)[target].cuda() - target_1_hot = target_1_hot.permute(0,3, 1, 2).float() - - m1_1 = pred_1_hot[:,1,:,:].view(num, -1).float() - m2_1 = target_1_hot[:,1,:,:].view(num, -1).float() - m1_2 = pred_1_hot[:,2,:,:].view(num, -1).float() - m2_2 = target_1_hot[:,2,:,:].view(num, -1).float() - - intersection_1 = (m1_1*m2_1).sum(1) - intersection_2 = (m1_2*m2_2).sum(1) - union_1 = (m1_1+m2_1).sum(1) + smooth - intersection_1 - union_2 = (m1_2+m2_2).sum(1) + smooth - intersection_2 - score_1 = intersection_1/union_1 - score_2 = intersection_2/union_2 - - return [score_1.mean()] - \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/create_folds.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py similarity index 96% rename from misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index d98c9ff2285..73695431ebd 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -15,9 +15,6 @@ class LungDataLoader(data.Dataset): """Class represents the dataloader for Lung segmentation task - ... - - Atributes --------- datapath: str @@ -52,12 +49,9 @@ def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= T [transforms.Resize(self.img_size), transforms.ToTensor() ]) - - + def __len__(self): - return len(self.files) - - + return len(self.files) def __getitem__(self,index): @@ -70,18 +64,14 @@ def __getitem__(self,index): labels = torch.cat((1.-lung_mask,lung_mask)) # return img, labels - + def transform(self,img,lung_mask): img = self.image_tf(img) img = img.type(torch.FloatTensor) lung_mask = self.lung_tf(lung_mask) lung_mask = lung_mask.type(torch.FloatTensor) - - - return img,lung_mask - - + return img,lung_mask class LungPatchDataLoader(data.Dataset): diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_patches.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py similarity index 99% rename from misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py index 8dee91c1716..17a118a125c 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/generate_slices.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py @@ -1,14 +1,9 @@ -#!/usr/bin/env python -# coding: utf-8 - - import SimpleITK as sitk import pylidc as pl import matplotlib.pyplot as plt import numpy as np from collections import defaultdict import os -import glob import cv2 from tqdm import tqdm as tq diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_lung_seg.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/infer_patch_classifier.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/lenet.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/lung_seg_adv.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/training/train_pack/patch_classifier.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/r2unet.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/sumnet_bn_vgg.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/inference/infer_pack/utils.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/data_prep/prep_pack/visualize.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py From 36f27fd9ab4be57e2b8b3135261470062a6e9457 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Tue, 3 Jan 2023 13:50:48 +0530 Subject: [PATCH 03/47] pylint fix 1 --- .../lung_nodule_detection/init_venv.sh | 8 +- .../lung_nodule_detection/requirements.txt | 4 +- .../lung_nodule_detection/src/inference.py | 65 ++- .../lung_nodule_detection/src/prepare_data.py | 184 ++++--- .../src/train_network.py | 104 ++-- .../src/utils/__init__.py | 0 .../src/utils/create_folds.py | 504 +++++++++--------- .../src/utils/data_loader.py | 34 +- .../src/utils/generate_patches.py | 134 ++--- .../src/utils/generate_slices.py | 37 +- .../src/utils/infer_lung_seg.py | 271 +++++----- .../src/utils/infer_patch_classifier.py | 6 +- .../lung_nodule_detection/src/utils/lenet.py | 8 +- .../src/utils/lung_seg.py | 48 +- .../src/utils/lung_seg_adv.py | 70 ++- .../src/utils/max_unpool_2d.py | 129 +++++ .../src/utils/patch_classifier.py | 77 +-- .../lung_nodule_detection/src/utils/r2unet.py | 102 ++-- .../src/utils/sumnet_bn_vgg.py | 156 +++--- .../lung_nodule_detection/src/utils/utils.py | 5 +- .../src/utils/visualize.py | 74 ++- 21 files changed, 1021 insertions(+), 999 deletions(-) create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh b/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh index 2697b9bc2da..651940fc19d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh +++ b/misc/pytorch_toolkit/lung_nodule_detection/init_venv.sh @@ -1,13 +1,13 @@ #!/usr/bin/env bash -work_dir=$(realpath "$(dirname $0)") +work_dir= "$(realpath "$(dirname $0)")" venv_dir=$1 if [ -z "$venv_dir" ]; then venv_dir=venv fi -cd ${work_dir} +cd "${work_dir}" if [ -e venv ]; then echo @@ -15,9 +15,9 @@ if [ -e venv ]; then echo "$ . venv/bin/activate" fi -virtualenv ${venv_dir} -p python3 --prompt="nodule" +virtualenv "${venv_dir}" -p python3 --prompt="nodule" -. ${venv_dir}/bin/activate +. "${venv_dir}"/bin/activate cat requirements.txt | xargs -n 1 -L 1 pip3 install diff --git a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt index b8611b0f8e2..cabd5f67950 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt +++ b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt @@ -6,4 +6,6 @@ openvino-dev[onnx]==2021.4.2 onnxruntime==1.8.1 wget tqdm -pytest \ No newline at end of file +pytest +matplotlib +natsort diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py index fea91ba64f0..0940e2e5392 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py @@ -1,41 +1,40 @@ -from utils import infer_lung_seg -from utils import infer_patch_classifier +from .utils import infer_lung_seg +from .utils import infer_patch_classifier import argparse def main(args): - if args.lungseg: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - network = args.network - infer_lung_seg.infer_lungseg(foldno,savepath,network,jsonpath) - else: - savepath = args.savepath - imgpath = args.imgpath - infer_patch_classifier.lungpatch_classifier(savepath,imgpath) + if args.lungseg: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + network = args.network + infer_lung_seg.infer_lungseg(foldno,savepath,network,jsonpath) + else: + savepath = args.savepath + imgpath = args.imgpath + infer_patch_classifier.lungpatch_classifier(savepath,imgpath) if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Select action to be performed') - - parser.add_argument('--lungseg', default=False, action='store_true', - help='To test lung segmentation') - parser.add_argument('--patchclass', default=False, action='store_true', - help='test network to classify patch') - - parser.add_argument('--savepath', - help='Folder location to save the files') - parser.add_argument('--foldno', - help='Fold number') - parser.add_argument('--jsonpath', - help='Folder location where jsons are stored') - parser.add_argument('--imgpath', - help='Folder location where test images are stored') - parser.add_argument('--network', - help='Network to be trained') - - args= parser.parse_args() - - main(args) \ No newline at end of file + parser = argparse.ArgumentParser(description='Select action to be performed') + + parser.add_argument('--lungseg', default=False, action='store_true', + help='To test lung segmentation') + parser.add_argument('--patchclass', default=False, action='store_true', + help='test network to classify patch') + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--imgpath', + help='Folder location where test images are stored') + parser.add_argument('--network', + help='Network to be trained') + + args= parser.parse_args() + + main(args) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py index 471d2611b1e..92b1250db28 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py @@ -1,102 +1,100 @@ import argparse -from utils import visualize -from utils import generate_slices -from utils import create_folds -from utils import generate_patches +from .utils import visualize +from .utils import generate_slices +from .utils import create_folds +from .utils import generate_patches def main(args): - if args.genslice: - dataset_path = args.datasetpath - save_path = args.savepath - masktype = args.masktype - if masktype == 'nodule': - generate_slices.extract_slices(dataset_path,save_path,masktype) - else: - generate_slices.generate_lungseg(dataset_path,save_path) - - elif args.createfolds: - data_path = args.datapath - save_path = args.savepath - dataset_path = args.datasetpath - - create_folds.positive_negative_classifier(data_path,save_path) - dict_subset = create_folds.subset_classifier(dataset_path,save_path) - create_folds.assign_folds(dict_subset,save_path) - if args.additional: - create_folds.create_balanced_dataset(save_path,data_path,additional=True) - else: - create_folds.create_balanced_dataset(save_path,data_path) - - - elif args.genpatch: - jsonpath = args.jsonpath - foldno = args.foldno - category = args.category - data_path = args.datapath - lungsegpath = args.lungsegpath - savepath = args.savepath - patchtype = args.patchtype - if patchtype == 'positive': - generate_patches.generate_patchlist(jsonpath,patchtype,foldno) - generate_patches.generate_positive_patch(jsonpath,foldno,data_path,savepath,category) - else: - generate_patches.generate_patchlist(jsonpath,patchtype,foldno) - generate_patches.generate_negative_patch(jsonpath,foldno,data_path,lungsegpath,savepath,category) - - - elif args.visualize: - seriesuid = args.seriesuid - slice_num = args.sliceno - data_path = args.datapath - savepath = args.savepath - - visualize.visualize_data(seriesuid,slice_num,data_path,savepath) - - else: - print('Arguments not passed. Use -h for help') + if args.genslice: + dataset_path = args.datasetpath + save_path = args.savepath + masktype = args.masktype + if masktype == 'nodule': + generate_slices.extract_slices(dataset_path,save_path,masktype) + else: + generate_slices.generate_lungseg(dataset_path,save_path) + + elif args.createfolds: + data_path = args.datapath + save_path = args.savepath + dataset_path = args.datasetpath + + create_folds.positive_negative_classifier(data_path,save_path) + dict_subset = create_folds.subset_classifier(dataset_path,save_path) + create_folds.assign_folds(dict_subset,save_path) + if args.additional: + create_folds.create_balanced_dataset(save_path,data_path,additional=True) + else: + create_folds.create_balanced_dataset(save_path,data_path) + + + elif args.genpatch: + jsonpath = args.jsonpath + foldno = args.foldno + category = args.category + data_path = args.datapath + lungsegpath = args.lungsegpath + savepath = args.savepath + patchtype = args.patchtype + if patchtype == 'positive': + generate_patches.generate_patchlist(jsonpath,patchtype,foldno) + generate_patches.generate_positive_patch(jsonpath,foldno,data_path,savepath,category) + else: + generate_patches.generate_patchlist(jsonpath,patchtype,foldno) + generate_patches.generate_negative_patch(jsonpath,foldno,data_path,lungsegpath,savepath,category) + + + elif args.visualize: + seriesuid = args.seriesuid + slice_num = args.sliceno + data_path = args.datapath + savepath = args.savepath + + visualize.visualize_data(seriesuid,slice_num,data_path,savepath) + + else: + print('Arguments not passed. Use -h for help') if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Select action to be performed') - - parser.add_argument('--genslice', default=False, action='store_true', - help='To create slices from 3D volume') - parser.add_argument('--createfolds', default=False, action='store_true', - help='Split dataset into 10 folds') - parser.add_argument('--genpatch', default=False, action='store_true', - help='To create patches from 2D slices') - parser.add_argument('--visualize', default=False, action='store_true', - help='Visualize any one of the slices') - parser.add_argument('--savepath', - help='Folder location to save the files') - parser.add_argument('--masktype', - help='Type of mask to be generated. ie, nodule or lung') - parser.add_argument('--datasetpath', - help='Folder location of downloaded dataset') - parser.add_argument('--foldno', - help='Fold number') - parser.add_argument('--additional', default=False, action='store_true', - help='Add additional slices') - parser.add_argument('--category', - help='Category of data.[trainset,valset,testset]') - parser.add_argument('--jsonpath', - help='Folder location where jsons are stored') - parser.add_argument('--datapath', - help='Folder location containing img and mask folders') - parser.add_argument('--lungsegpath', - help='Folder containing lung segmentation mask') - parser.add_argument('--patchtype', - help='positive or negative') - parser.add_argument('--sliceno', - help='Slice number to visualize') - parser.add_argument('--seriesuid', - help='Seriesuid of slice to visualize') - - args=parser.parse_args() - - - - main(args) \ No newline at end of file + parser = argparse.ArgumentParser(description='Select action to be performed') + + parser.add_argument('--genslice', default=False, action='store_true', + help='To create slices from 3D volume') + parser.add_argument('--createfolds', default=False, action='store_true', + help='Split dataset into 10 folds') + parser.add_argument('--genpatch', default=False, action='store_true', + help='To create patches from 2D slices') + parser.add_argument('--visualize', default=False, action='store_true', + help='Visualize any one of the slices') + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--masktype', + help='Type of mask to be generated. ie, nodule or lung') + parser.add_argument('--datasetpath', + help='Folder location of downloaded dataset') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--additional', default=False, action='store_true', + help='Add additional slices') + parser.add_argument('--category', + help='Category of data.[trainset,valset,testset]') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--datapath', + help='Folder location containing img and mask folders') + parser.add_argument('--lungsegpath', + help='Folder containing lung segmentation mask') + parser.add_argument('--patchtype', + help='positive or negative') + parser.add_argument('--sliceno', + help='Slice number to visualize') + parser.add_argument('--seriesuid', + help='Seriesuid of slice to visualize') + + args=parser.parse_args() + + main(args) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py index 5942073125d..9e1d63507c9 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py @@ -5,67 +5,67 @@ def main(args): - if args.lungseg: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - datapath = args.datapath - lungsegpath = args.lungmask - network = args.network - if args.epochs: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) + if args.lungseg: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + datapath = args.datapath + lungsegpath = args.lungmask + network = args.network + if args.epochs: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) - else: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) + else: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) - elif args.lungsegadv: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - datapath = args.datapath - lungsegpath = args.lungmask - network = args.network - if args.epochs: - lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) - else: - lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network) - else: - savepath = args.savepath - imgpath = args.datapath - if args.epochs: - patch_classifier.lungpatch_classifier(savepath,imgpath,args.epochs) - else: - patch_classifier.lungpatch_classifier(savepath,imgpath) + elif args.lungsegadv: + foldno = args.foldno + savepath = args.savepath + jsonpath = args.jsonpath + datapath = args.datapath + lungsegpath = args.lungmask + network = args.network + if args.epochs: + lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) + else: + lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network) + else: + savepath = args.savepath + imgpath = args.datapath + if args.epochs: + patch_classifier.lungpatch_classifier(savepath,imgpath,args.epochs) + else: + patch_classifier.lungpatch_classifier(savepath,imgpath) if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Select action to be performed') + parser = argparse.ArgumentParser(description='Select action to be performed') - parser.add_argument('--lungseg', default=False, action='store_true', - help='To Train lung segmentation') - parser.add_argument('--lungsegadv', default=False, action='store_true', - help='To train lung seg network adversarially') - parser.add_argument('--patchclass', default=False, action='store_true', - help='Train network to classify patch') + parser.add_argument('--lungseg', default=False, action='store_true', + help='To Train lung segmentation') + parser.add_argument('--lungsegadv', default=False, action='store_true', + help='To train lung seg network adversarially') + parser.add_argument('--patchclass', default=False, action='store_true', + help='Train network to classify patch') - parser.add_argument('--savepath', - help='Folder location to save the files') - parser.add_argument('--foldno', - help='Fold number') - parser.add_argument('--jsonpath', - help='Folder location where jsons are stored') - parser.add_argument('--datapath', - help='Folder location where img and masks are stored') - parser.add_argument('--lungmask', - help='Folder location where lung masks are stored') + parser.add_argument('--savepath', + help='Folder location to save the files') + parser.add_argument('--foldno', + help='Fold number') + parser.add_argument('--jsonpath', + help='Folder location where jsons are stored') + parser.add_argument('--datapath', + help='Folder location where img and masks are stored') + parser.add_argument('--lungmask', + help='Folder location where lung masks are stored') - parser.add_argument('--network', - help='Network to be trained') - parser.add_argument('--epochs', - help='Number of epochs') + parser.add_argument('--network', + help='Network to be trained') + parser.add_argument('--epochs', + help='Number of epochs') - args= parser.parse_args() + args= parser.parse_args() - main(args) \ No newline at end of file + main(args) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py index b3a3d31345d..aebb0beb01d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py @@ -10,282 +10,282 @@ import argparse def positive_negative_classifier(data_path,save_path): - """Classifies slices as positive and negative slices. + """Classifies slices as positive and negative slices. - If any non-zero value is present in the mask/GT of - the specified slice then it is classified as positive else negative. + If any non-zero value is present in the mask/GT of + the specified slice then it is classified as positive else negative. - Parameters - ---------- - data_path: str - The folder location where masks/GT for slices are stored. - save_path: str - The folder location where output json files should be stored. + Parameters + ---------- + data_path: str + The folder location where masks/GT for slices are stored. + save_path: str + The folder location where output json files should be stored. - Returns - ------- - None + Returns + ------- + None - """ + """ - mask_path = data_path+'/mask/' + mask_path = data_path+'/mask/' - file_list=natsorted(os.listdir(mask_path)) - positive_list,negative_list=[],[] + file_list=natsorted(os.listdir(mask_path)) + positive_list,negative_list=[],[] - for file in tq(file_list): + for file in tq(file_list): - try: - mask = np.load(mask_path+file) - if (np.any(mask)): - positive_list.append(file) - else: - negative_list.append(file) - # break - except: - print('Skipped %s ,Unable to locate corresponding mask') - continue + try: + mask = np.load(mask_path+file) + if (np.any(mask)): + positive_list.append(file) + else: + negative_list.append(file) + # break + except: + print('Skipped %s ,Unable to locate corresponding mask') + continue - - with open(save_path+'positive_slices.json', 'w') as f: - json.dump(positive_list, f) - with open(save_path+'negative_slices.json', 'w') as g: - json.dump(negative_list, g) + + with open(save_path+'positive_slices.json', 'w') as f: + json.dump(positive_list, f) + with open(save_path+'negative_slices.json', 'w') as g: + json.dump(negative_list, g) def subset_classifier(dataset_path,save_path): - """ Classifies the slices according the subset of origin + """ Classifies the slices according the subset of origin + + Parameters + ---------- + dataset_path: str + Folder location where dataset is present - Parameters - ---------- - dataset_path: str - Folder location where dataset is present + Returns + ------- + dict + A dictionary consisting of filename according to their subset. + """ - Returns - ------- - dict - A dictionary consisting of filename according to their subset. - """ + dict_subset={} + dict_subset = defaultdict(lambda:[],dict_subset) + for i in range(10): + #Since 10 subsets are provided in the dataset. + for file in tq(os.listdir(dataset_path+'subset'+str(i))): + file_name=os.path.basename(file) + if file_name.endswith(".mhd"): + dict_subset['subset'+str(i)].append(file_name) - dict_subset={} - dict_subset = defaultdict(lambda:[],dict_subset) - for i in range(10): - #Since 10 subsets are provided in the dataset. - for file in tq(os.listdir(dataset_path+'subset'+str(i))): - file_name=os.path.basename(file) - if file_name.endswith(".mhd"): - dict_subset['subset'+str(i)].append(file_name) + with open(save_path+'subset_classification.json', 'w') as h: + json.dump(dict_subset, h) - with open(save_path+'subset_classification.json', 'w') as h: - json.dump(dict_subset, h) - - return dict_subset + return dict_subset def assign_folds(dict_subset,save_path): - """ Divides subsets into train,validation and testing sets of corresponding folds - - Parameters - ---------- - dict_subset: dict - Dictionary which has the files and its corresponding subset - savepath: str - Folder location to save the result - - Returns - ------- - None - """ - - dataset_list=[dict_subset['subset0'],dict_subset['subset1'], - dict_subset['subset2'],dict_subset['subset3'], - dict_subset['subset4'],dict_subset['subset5'], - dict_subset['subset6'],dict_subset['subset7'], - dict_subset['subset8'],dict_subset['subset9']] - - for i in tq(range(10)): #10 Subsets in the dataset - - fold={} - fold = defaultdict(lambda:0,fold) - fold['train_set']=dataset_list[0-i]+dataset_list[1-i]+dataset_list[2-i]+dataset_list[3-i]+dataset_list[4-i]+dataset_list[5-i]+dataset_list[6-i]+dataset_list[7-i] - - fold['valid_set']=dataset_list[8-i] - fold['test_set']=dataset_list[9-i] - - - fold_name='fold'+str(i)+'_mhd.json' - with open(save_path+fold_name, 'w') as j: - json.dump(fold, j) - + """ Divides subsets into train,validation and testing sets of corresponding folds + + Parameters + ---------- + dict_subset: dict + Dictionary which has the files and its corresponding subset + savepath: str + Folder location to save the result + + Returns + ------- + None + """ + + dataset_list=[dict_subset['subset0'],dict_subset['subset1'], + dict_subset['subset2'],dict_subset['subset3'], + dict_subset['subset4'],dict_subset['subset5'], + dict_subset['subset6'],dict_subset['subset7'], + dict_subset['subset8'],dict_subset['subset9']] + + for i in tq(range(10)): #10 Subsets in the dataset + + fold={} + fold = defaultdict(lambda:0,fold) + fold['train_set']=dataset_list[0-i]+dataset_list[1-i]+dataset_list[2-i]+dataset_list[3-i]+dataset_list[4-i]+dataset_list[5-i]+dataset_list[6-i]+dataset_list[7-i] + + fold['valid_set']=dataset_list[8-i] + fold['test_set']=dataset_list[9-i] + + + fold_name='fold'+str(i)+'_mhd.json' + with open(save_path+fold_name, 'w') as j: + json.dump(fold, j) + def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test): - """Adds additional negative slices to the prepared datalist - - Parameters - ---------- - series_uid_npylist: nparray - fold_npy: dict - series_uid_train: list - sereies_uid_val: list - sereies_uid_test: list - - Returns - ------- - dict - - """ - - for i in (series_uid_train): - c = series_uid_npylist.count(i) - count[i] = c - - if i in pos_list: - - for j in range(5): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['train_set'].append(file) - for j in range(count[i]-5,count[i]): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['train_set'].append(file) - - - for i in (series_uid_val): - c = series_uid_npylist.count(i) - count[i] = c - - if i in pos_list: - - for j in range(5): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['valid_set'].append(file) - for j in range(count[i]-5,count[i]): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['valid_set'].append(file) - - - for i in (series_uid_test): - c = series_uid_npylist.count(i) - count[i] = c - - if i in pos_list: - - for j in range(5): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['test_set'].append(file) - for j in range(count[i]-5,count[i]): - file = str(i)+'_slice'+str(j)+'.npy' - fold_npy['test_set'].append(file) - - return fold_npy + """Adds additional negative slices to the prepared datalist + + Parameters + ---------- + series_uid_npylist: nparray + fold_npy: dict + series_uid_train: list + sereies_uid_val: list + sereies_uid_test: list + + Returns + ------- + dict + + """ + + for i in (series_uid_train): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['train_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['train_set'].append(file) + + + for i in (series_uid_val): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['valid_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['valid_set'].append(file) + + + for i in (series_uid_test): + c = series_uid_npylist.count(i) + count[i] = c + + if i in pos_list: + + for j in range(5): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['test_set'].append(file) + for j in range(count[i]-5,count[i]): + file = str(i)+'_slice'+str(j)+'.npy' + fold_npy['test_set'].append(file) + + return fold_npy def create_balanced_dataset(save_path,data_path,additional=False): - """Creates balanced dataset with equal positive and negative slices - - Parameters - ---------- - data_path: str - The folder location where image/.npy for slices are stored. - save_path: str - The folder location where output json files should be stored. - additional: Boolean,Optional - If True add additonal negative slices - - Returns - ------- - dict - Returns dict with equal positive and negative slices. - """ - - img_path = data_path+'/img/' - - with open(save_path+'positive_slices.json') as c: - pos_slices_json=json.load(c) - - - pos_list=[x.split('.mhd')[0] for x in pos_slices_json] - pos_list_uq=np.unique(np.array(pos_list)) - - - print('Sorting entire image set. Will take time.') - sorted_list=natsorted(os.listdir(img_path)) - - print('Sorting completed') - - - for i in tq(range(10)): - with open(save_path+'fold'+str(i)+'_mhd.json') as f: - j_data=json.load(f) - - pos_count=0 - neg_count=0 - count = {} - fold_npy={} - fold_npy = defaultdict(lambda:[],fold_npy) - series_uid_train=[x.split('.mhd')[0] for x in j_data['train_set']] - series_uid_val=[x.split('.mhd')[0] for x in j_data['valid_set']] - series_uid_test=[x.split('.mhd')[0] for x in j_data['test_set']] - fold_npy_name='fold'+str(i)+'_pos_neg_eq.json' - # series_uid_npylist=[x.split('_')[0] for x in npy_list] - # series_uid_npylist_uq=np.unique(np.array(series_uid_npylist)) - - for f,name in enumerate(sorted_list): - - for q in series_uid_train: - if q in name : - if name in pos_slices_json: - #pos_slices_json contains the list of all positive slices. - pos_count += 1 - fold_npy['train_set'].append(name) - elif pos_count>neg_count: - # Here the slice will be negative since 'name' not in pos_slices - neg_count += 1 - fold_npy['train_set'].append(name) - else: - continue - else: - continue - - - for q in series_uid_val: - - if q in name : - if name in pos_slices_json: - - pos_count += 1 - fold_npy['valid_set'].append(name) - elif pos_count>neg_count: - - neg_count += 1 - fold_npy['valid_set'].append(name) - else: - continue - else: - continue - - for q in series_uid_test: - if q in name : - if name in pos_slices_json: - pos_count += 1 - fold_npy['test_set'].append(name) - elif pos_count>neg_count: - neg_count += 1 - fold_npy['test_set'].append(name) - else: - continue - else: - continue - with open(save_path+fold_npy_name, 'w') as z: - json.dump(fold_npy,z) - - if additional == True: - fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) - - - - print('Balanced dataset generated and saved') - - return fold_npy + """Creates balanced dataset with equal positive and negative slices + + Parameters + ---------- + data_path: str + The folder location where image/.npy for slices are stored. + save_path: str + The folder location where output json files should be stored. + additional: Boolean,Optional + If True add additonal negative slices + + Returns + ------- + dict + Returns dict with equal positive and negative slices. + """ + + img_path = data_path+'/img/' + + with open(save_path+'positive_slices.json') as c: + pos_slices_json=json.load(c) + + + pos_list=[x.split('.mhd')[0] for x in pos_slices_json] + pos_list_uq=np.unique(np.array(pos_list)) + + + print('Sorting entire image set. Will take time.') + sorted_list=natsorted(os.listdir(img_path)) + + print('Sorting completed') + + + for i in tq(range(10)): + with open(save_path+'fold'+str(i)+'_mhd.json') as f: + j_data=json.load(f) + + pos_count=0 + neg_count=0 + count = {} + fold_npy={} + fold_npy = defaultdict(lambda:[],fold_npy) + series_uid_train=[x.split('.mhd')[0] for x in j_data['train_set']] + series_uid_val=[x.split('.mhd')[0] for x in j_data['valid_set']] + series_uid_test=[x.split('.mhd')[0] for x in j_data['test_set']] + fold_npy_name='fold'+str(i)+'_pos_neg_eq.json' + # series_uid_npylist=[x.split('_')[0] for x in npy_list] + # series_uid_npylist_uq=np.unique(np.array(series_uid_npylist)) + + for f,name in enumerate(sorted_list): + + for q in series_uid_train: + if q in name : + if name in pos_slices_json: + #pos_slices_json contains the list of all positive slices. + pos_count += 1 + fold_npy['train_set'].append(name) + elif pos_count>neg_count: + # Here the slice will be negative since 'name' not in pos_slices + neg_count += 1 + fold_npy['train_set'].append(name) + else: + continue + else: + continue + + + for q in series_uid_val: + + if q in name : + if name in pos_slices_json: + + pos_count += 1 + fold_npy['valid_set'].append(name) + elif pos_count>neg_count: + + neg_count += 1 + fold_npy['valid_set'].append(name) + else: + continue + else: + continue + + for q in series_uid_test: + if q in name : + if name in pos_slices_json: + pos_count += 1 + fold_npy['test_set'].append(name) + elif pos_count>neg_count: + neg_count += 1 + fold_npy['test_set'].append(name) + else: + continue + else: + continue + with open(save_path+fold_npy_name, 'w') as z: + json.dump(fold_npy,z) + + if additional == True: + fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) + + + + print('Balanced dataset generated and saved') + + return fold_npy diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index 73695431ebd..3b443b2248c 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -1,15 +1,9 @@ -#!/usr/bin/env python -# coding: utf-8 - import torch from torch.utils import data import os from torchvision import transforms from PIL import Image import numpy as np -from glob import glob -from natsort import natsorted - class LungDataLoader(data.Dataset): @@ -32,7 +26,7 @@ class LungDataLoader(data.Dataset): """ def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= True,img_size= 512): - + self.split=split self.path= datapath self.lung_path=lung_path @@ -44,19 +38,19 @@ def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= T [transforms.Resize(self.img_size), transforms.ToTensor() ]) - + self.lung_tf = transforms.Compose( [transforms.Resize(self.img_size), transforms.ToTensor() ]) def __len__(self): - return len(self.files) - + return len(self.files) + def __getitem__(self,index): filename = self.files[index] - img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) + img = Image.fromarray(np.load(self.path+'image/'+filename).astype(float)) lung_mask = Image.fromarray(np.load(self.lung_path+filename).astype(float)) if self.is_transform: @@ -77,19 +71,15 @@ def transform(self,img,lung_mask): class LungPatchDataLoader(data.Dataset): def __init__(self,imgpath,split="train_set",is_transform= True): - + self.split = split - self.imgpath = imgpath+self.split+'/img/' self.is_transform = is_transform self.files = os.listdir(self.imgpath) - def __len__(self): return len(self.files) - - - + def __getitem__(self,index): filename = self.files[index] @@ -97,24 +87,18 @@ def __getitem__(self,index): if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 l2 = 0 else: - l2 = 1 + l2 = 1 label = torch.tensor([l1,l2]) img = np.load(self.imgpath+filename) - size_dataset = len(os.listdir(self.imgpath)) - - if self.is_transform: img= self.transform(img) return img,label - - - + def transform(self,img): img = torch.Tensor(img).unsqueeze(0) img = img.type(torch.FloatTensor) return img - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py index 57d99156da6..44b20c20afd 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py @@ -7,7 +7,7 @@ import cv2 from skimage.util.shape import view_as_windows import json -from tqdm import tqdm as tq +from tqdm import tqdm as tq import matplotlib.pyplot as plt import random from collections import defaultdict @@ -47,17 +47,17 @@ def generate_patchlist(save_path,patchtype,fold_no=0): for i in tq(train_set): if i in pos_slices_json: train_seg_list.append(i) - + for i in tq(valid_set): if i in pos_slices_json: val_seg_list.append(i) - + for i in tq(test_set): if i in pos_slices_json: test_seg_list.append(i) - + patch_npy={} - patch_npy = defaultdict(lambda:[],patch_npy) + patch_npy = defaultdict(lambda:[],patch_npy) patch_npy['train_set'] = train_seg_list patch_npy['valid_set'] = val_seg_list patch_npy['test_set'] = test_seg_list @@ -75,7 +75,7 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego jsonpath: str Folder location where json files are stored fold: int - Fold number + Fold number category: str train_set/val_set/test_set data_path: str @@ -97,13 +97,13 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego img_dir = imgpath mask_dir = lung_segpath - nm_list = j_data[category] + nm_list = j_data[category] size = 64 index = 0 for img_name in tq(nm_list): #Loading the masks as uint8 as threshold function accepts 8bit image as parameter. - img = np.load(os.path.join(img_dir, img_name)).astype(np.float32)#*255 + img = np.load(os.path.join(img_dir, img_name)).astype(np.float32)#*255 mask = np.load(os.path.join(mask_dir, img_name)).astype(np.uint8)#*255 if np.any(mask): @@ -111,28 +111,28 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value contours, hierarchy = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda x: cv2.contourArea(x)) - + #In certain cases there could be more than 2 contour, hence taking the largest 2 which will be lung contours = contours[1:] - - + + for cntr in contours: patch_count = 2 - for i in range(patch_count): + for _ in range(patch_count): xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width - xc,yc = xr+wr/2,yr+hr/2 - + # xc,yc = xr+wr/2,yr+hr/2 + try: x, y = random.randrange(xr, xr+wr-size/2),random.randrange(yr, yr+hr-size/2) - + except: prob = random.randrange(0, 1) if prob>0.5: x, y = random.randrange(xr, xr+wr/2),random.randrange(yr, yr+hr/2) else: x, y = random.randrange(int(xr+wr/2),xr+wr),random.randrange(int(yr+hr/2),yr+hr) - + if x+size<512 & y+size<512: patch_img = img[y: y+size, x: x+size].copy().astype(np.float16) patch_mask = np.zeros((size,size)).astype(np.float16) @@ -148,10 +148,10 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego elif x-size>0 & y-size<=0: patch_img = img[0: size, x-size: x].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) - + patch_mask = np.zeros((size,size)).astype(np.float16) + else: - + patch_img = img[y-size: y, x-size: x].copy().astype(np.float16) patch_mask = np.zeros((size,size)).astype(np.float16) @@ -161,7 +161,7 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego print('shape',np.shape(patch_img)) print('cordinate of patch',x,x+size,y,y+size) print('cordinate of BBox',xr,yr,wr,hr) - + index += 1 img_savepath = savepath+'/patches/'+'/img/' mask_savepath = savepath+'/patches/'+'/mask/' @@ -186,7 +186,7 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set jsonpath: str Folder location where json files are stored fold: int - Fold number + Fold number category: str train_set/val_set/test_set data_path: str @@ -197,7 +197,7 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set Returns ------- - None + None """ imgpath = data_path + '/img/' @@ -207,8 +207,8 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set j_data = json.load(file) img_dir = imgpath - mask_dir = maskpath - nm_list = j_data[category] + mask_dir = maskpath + nm_list = j_data[category] size = 64 index = 0 @@ -221,12 +221,12 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set if np.any(mask): #Convert grayscale image to binary _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value - contours, hierarchy = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours, _ = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda x: cv2.contourArea(x)) - + for cntr in contours: patch_count = 4 - + xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width xc,yc = int(xr+wr/2),int(yr+hr/2) @@ -238,24 +238,24 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set elif int(yc-size/2) >0 and int(xc-size/2)<0: patch_img1 = img[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) patch_mask1 = mask[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) - + elif int(yc-size/2) <0 and int(xc-size/2)>0: patch_img1 = img[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - patch_mask1 = mask[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - - + patch_mask1 = mask[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + + elif int(yc+size/2)>512 or int(xc+size/2)>512: if int(yc+size/2)>512 and int(xc+size/2)>512: m = yc+size - 512 n = xc + size - 512 patch_img1 = img[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) - patch_mask1 = mask[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) - + patch_mask1 = mask[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) + elif int(yc+size/2)>512 and int(xc+size/2)<512: m = yc+size - 512 patch_img1 = img[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - patch_mask1 = mask[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - + patch_mask1 = mask[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + elif int(yc+size/2)<512 and int(xc+size/2)>512: n = xc+size - 512 patch_img1 = img[int(yc-size/2):int(yc+size/2),int(xc-n):512].copy().astype(np.float16) @@ -263,13 +263,17 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set elif (int(yc-size/2)>=0 and int(yc+size/2)<=512) : if(int(xc-size/2)>=0 and int(xc+size/2)<=512): - patch_img1 = img[int(yc-size/2):int(yc+size/2) , int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - patch_mask1 = mask[int(yc-size/2):int(yc+size/2) , int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_img1 = img[ + int(yc-size/2):int(yc+size/2), + int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[ + int(yc-size/2):int(yc+size/2), + int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) if np.shape(patch_img1) != (64,64): print('shape',np.shape(patch_img1)) print('cordinate of patch',x,x+size,y,y+size) - print('cordinate of BBox',xr,yr,wr,hr) + print('cordinate of BBox',xr,yr,wr,hr) img_savepath = savepath+'/patches/'+category+'/img/' mask_savepath = savepath+'/patches/'+category+'/mask/' @@ -289,9 +293,9 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set for i in range(patch_count): xc,yc = xr,yr xc,yc = xr+wr,yr+hr - + if i == 0: - + if xc+size<512 and yc+size<512: patch_img = img[yc:yc+size,xc:xc+size].copy().astype(np.float16) patch_mask = mask[yc:yc+size,xc:xc+size].copy().astype(np.float16) @@ -304,77 +308,77 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set elif xc+size<512 and yc+size>512: n = yc+size-512 patch_img = img[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) else: - m = xc+size-512 + m = xc+size-512 n = yc+size-512 patch_img = img[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) patch_mask = mask[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) elif i ==1: - + if xc-size>0 and yc+size<512: patch_img = img[yc:yc+size,xc-size:xc].copy().astype(np.float16) patch_mask = mask[yc:yc+size,xc-size:xc].copy().astype(np.float16) - + elif xc-size<0 and yc+size<512: - + patch_img = img[yc:yc+size,0:size].copy().astype(np.float16) - patch_mask = mask[yc:yc+size,0:size].copy().astype(np.float16) - + patch_mask = mask[yc:yc+size,0:size].copy().astype(np.float16) + elif xc-size>0 and yc+size>512: n = yc+size-512 patch_img = img[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) - + patch_mask = mask[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) + else: n = yc+size-512 patch_img = img[yc-n:yc+size-n,0:size].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,0:size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n,0:size].copy().astype(np.float16) elif i ==2: - + if xc+size<512 and yc-size>0: patch_img = img[yc-size:yc,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc:xc+size].copy().astype(np.float16) elif xc+size>512 and yc-size>0: m = xc+size-512 patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) - + elif xc+size<512 and yc-size<0: patch_img = img[0:size,xc:xc+size].copy().astype(np.float16) patch_mask = mask[0:size,xc:xc+size].copy().astype(np.float16) - + else: m = xc+size-512 patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) - + patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + elif i==3: - + if xc-size>0 and yc-size>0: patch_img = img[yc-size:yc,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc-size:xc].copy().astype(np.float16) + patch_mask = mask[yc-size:yc,xc-size:xc].copy().astype(np.float16) elif xc-size<0 and yc-size>0: m = xc+size-512 patch_img = img[yc-size:yc,0:size].copy().astype(np.float16) patch_mask = mask[yc-size:yc,0:size].copy().astype(np.float16) - + elif xc-size>0 and yc-size<0: patch_img = img[0:size,xc-size:xc].copy().astype(np.float16) patch_mask = mask[0:size,xc-size:xc].copy().astype(np.float16) - + else: patch_img = img[0:size,0:size].copy().astype(np.float16) - patch_mask = mask[0:size,0:size].copy().astype(np.float16) - - + patch_mask = mask[0:size,0:size].copy().astype(np.float16) + + if np.shape(patch_img) != (64,64): print('shape',np.shape(patch_img)) - + img_savepath = savepath+'/patches/'+category+'/img/' mask_savepath = savepath+'/patches/'+category+'/mask/' if not os.path.isdir(img_savepath): @@ -390,5 +394,3 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) index += 1 - - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py index 17a118a125c..703523c8365 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py @@ -1,6 +1,5 @@ import SimpleITK as sitk import pylidc as pl -import matplotlib.pyplot as plt import numpy as np from collections import defaultdict import os @@ -33,7 +32,7 @@ def make_mask(height,width,slice_list,*args, **kwargs): point_dictx=kwargs.get('ii', None) point_dicty=kwargs.get('jj', None) - + if n in slice_list: temp_listx=point_dictx[n] temp_listy=point_dicty[n] @@ -46,7 +45,7 @@ def make_mask(height,width,slice_list,*args, **kwargs): return mask def extract_slices(dataset_path,save_path,masktype='nodule'): - """Extracts induvidual slices from the CT volumes given + """Extracts induvidual slices from the CT volumes given in the dataset, clips the max-min values and stores them as numpy arrays. @@ -79,14 +78,14 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): file_name=os.path.basename(file) series_instance_uid=os.path.splitext(file_name)[0] img_file=file - - itk_img = sitk.ReadImage(img_file) + + itk_img = sitk.ReadImage(img_file) img_array = sitk.GetArrayFromImage(itk_img) num_slice, height, width = img_array.shape #Has the image data scan = pl.query(pl.Scan).filter(pl.Scan.series_instance_uid== series_instance_uid).first() - + #Maped the image data with annotation using series id nods = scan.cluster_annotations() #Function used to determine which annotation belongs to which nodule @@ -99,13 +98,13 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): points_dicty={} points_dictx = defaultdict(lambda:[],points_dictx) points_dicty = defaultdict(lambda:[],points_dicty) - for i,nod in enumerate(nods): - nodule_dict[i]=len(nods[i]) #Stores a dict which has count of annotation for each nodule + for nod in nods: + nodule_dict[i]=len(nod) #Stores a dict which has count of annotation for each nodule for key,value in nodule_dict.items(): #if value>=3 : #Taking annotations provided by 3 or more annotator for i in range(value): - ann=nods[key][i] #-1 to keep index correct + ann=nods[key][i] #-1 to keep index correct con=ann.contours[0] #All coutours for specific nodule collected k = con.image_k_position # Returns the slice number/index which has the nodule @@ -114,22 +113,16 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): points_dictx[k].append(ii) points_dicty[k].append(jj) - - ''' - !!Note!! The pylidc package gives cordinates for single slices, If more than one annotaions are give then - Sum(x)/total no: of annotation for all provided pixel is given as input - - ''' - + # !!Note!! The pylidc package gives cordinates for single slices, If more than one annotaions are give then + # Sum(x)/total no: of annotation for all provided pixel is given as input for n in range(1,num_slice): - image=(img_array[n].copy()).astype(np.float32) + image=(img_array[n].copy()).astype(np.float32) im_max = np.max(image) - im_min = np.min(image) if im_max != 0: image[image>1000]=1000 - image[image<-1000]=-1000 + image[image<-1000]=-1000 mask=make_mask(height,width,slice_list,ii=points_dictx,jj=points_dicty,n=n) mask = np.array(mask, dtype=np.float32) image = image - image.min() @@ -151,7 +144,6 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): np.save(save_path+'/mask/'+series_instance_uid+'_slice'+str(n)+'.npy',mask) def generate_lungseg(dataset_path,save_path): - volume_list = os.listdir(dataset_path) file_list = [] for file in os.listdir(dataset_path): @@ -161,13 +153,12 @@ def generate_lungseg(dataset_path,save_path): for img_file in tq(file_list): file_name=os.path.basename(img_file) series_instance_uid=os.path.splitext(file_name)[0] - itk_img = sitk.ReadImage(img_file) + itk_img = sitk.ReadImage(img_file) img_array = sitk.GetArrayFromImage(itk_img) - num_slice, height, width = img_array.shape + num_slice, _, _ = img_array.shape for n in range(1,num_slice): if not os.path.isdir(save_path+'/lungseg'): os.makedirs(save_path+'/lungseg') np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) else: np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py index 3a10ad79bba..f31fa7a284d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py @@ -1,192 +1,173 @@ -#!/usr/bin/env python -# coding: utf-8 - import numpy as np import torch -import torch.nn as nn -from torch import optim from tqdm import tqdm as tq -import time from torch.utils import data import os import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt -plt.switch_backend('agg') -from .sumnet_bn_vgg import SUMNet -from .r2unet import R2U_Net -from .r2unet import U_Net -from torchvision import transforms import json -from PIL import Image -from .dataloader import LungDataLoader +from .sumnet_bn_vgg import SUMNet +from .r2unet import R2U_Net, U_Net +from .data_loader import LungDataLoader from .utils import dice_coefficient +plt.switch_backend('agg') +def infer_lungseg(fold_no,save_path,network,jsonpath): + """ Inference script for lung segmentation -def infer_lungseg(fold_no,savepath,network,jsonpath): - """ Inference script for lung segmentation + Parameters + ---------- + fold_no: int + Fold number to which action is to be performed + save_path: str + Folder location to save the results + network: str + Network name + jsonpath: + Folder location where file is to be stored - Parameters - ---------- - fold_no: int - Fold number to which action is to be performed - savepath: str - Folder location to save the results - network: str - Network name - jsonpath: - Folder location where file is to be stored + Returns + ------- + None - Returns - ------- - None + """ - """ + fold = 'fold'+str(fold_no) - fold = 'fold'+str(fold_no) + save_path = os.path.join(save_path,network,fold) - savePath = savepath+network+'/'+fold+'/' - if not os.path.isdir(savePath): - os.makedirs(savePath) + if not os.path.isdir(save_path): + os.makedirs(save_path) - with open(jsonpath+fold+'_pos_neg_eq.json') as f: - json_file = json.load(f) - test_set = json_file['test_set'] + with open(os.path.join(jsonpath,fold+'_pos_neg_eq.json')) as f: + json_file = json.load(f) - testDset = LungDataLoader(is_transform=True,json_file=json_file,split="test_set",img_size=512) - testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) + testDset = LungDataLoader(is_transform=True,json_file=json_file,split="test_set",img_size=512) + testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) - testBatches = 0 - testDice_lungs = 0 + testBatches = 0 + testDice_lungs = 0 - if network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) - elif network == 'unet': - net = U_Net(img_ch=1,output_ch=2) - else: - net = R2U_Net(img_ch=1,output_ch=2) + if network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + elif network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + else: + net = R2U_Net(img_ch=1,output_ch=2) - dice_list = [] - use_gpu = torch.cuda.is_available() + dice_list = [] + use_gpu = torch.cuda.is_available() - if use_gpu: - net = net.cuda() + if use_gpu: + net = net.cuda() - net.load_state_dict(torch.load(savePath+network+'_best_lungs.pt')) + net.load_state_dict(torch.load(save_path+network+'_best_lungs.pt')) - for data1 in tq(testDataLoader): + for data1 in tq(testDataLoader): - imgs, mask = data1 - labels = mask - if use_gpu: - inputs = imgs.cuda() - labels = labels.cuda() + imgs, mask = data1 + labels = mask + if use_gpu: + inputs = imgs.cuda() + labels = labels.cuda() - net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out.data,dim=1) + net_out = net(Variable(inputs)) + net_out_sf = F.softmax(net_out.data,dim=1) - test_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - - pred_max = torch.argmax(net_out_sf, dim=1) - preds = torch.zeros(pred_max.shape) - preds[pred_max == 1] = 1 + test_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + pred_max = torch.argmax(net_out_sf, dim=1) + preds = torch.zeros(pred_max.shape) + preds[pred_max == 1] = 1 - if not os.path.isdir(savePath+'seg_results/GT/'): - os.makedirs(savePath+'seg_results/GT/') - np.save(savePath+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + if not os.path.isdir(save_path+'seg_results/GT/'): + os.makedirs(save_path+'seg_results/GT/') + np.save(save_path+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) else: - np.save(savePath+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + np.save(save_path+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) - if not os.path.isdir(savePath+'seg_results/pred/'): - os.makedirs(savePath+'seg_results/pred/') - np.save(savePath+'seg_results/pred/image'+str(testBatches),preds.cpu()) + if not os.path.isdir(save_path+'seg_results/pred/'): + os.makedirs(save_path+'seg_results/pred/') + np.save(save_path+'seg_results/pred/image'+str(testBatches),preds.cpu()) else: - np.save(savePath+'seg_results/pred/image'+str(testBatches),preds.cpu()) + np.save(save_path+'seg_results/pred/image'+str(testBatches),preds.cpu()) - if not os.path.isdir(savePath+'seg_results/image/'): - os.makedirs(savePath+'seg_results/image/') - np.save(savePath+'seg_results/image/image'+str(testBatches),inputs.cpu()) + if not os.path.isdir(save_path+'seg_results/image/'): + os.makedirs(save_path+'seg_results/image/') + np.save(save_path+'seg_results/image/image'+str(testBatches),inputs.cpu()) else: - np.save(savePath+'seg_results/image/image'+str(testBatches),inputs.cpu()) + np.save(save_path+'seg_results/image/image'+str(testBatches),inputs.cpu()) + + testDice_lungs += test_dice[0] + dice_list.append(test_dice[0].cpu()) + testBatches += 1 + # if testBatches>1: + # break - testDice_lungs += test_dice[0] - dice_list.append(test_dice[0].cpu()) - testBatches += 1 - # if testBatches>1: - # break - - dice = np.mean(dice_list) - print("Result:",fold,dice) + dice = np.mean(dice_list) + print("Result:",fold,dice) - #Plots distribution of min values per volume - plt.figure() - plt.title('Distribution of Dice values') - plt.hist(dice_list) - plt.xlabel('Dice score') - plt.ylabel('No. of Slices') - plt.savefig(savePath+'dice_dist.jpg') - # plt.show() - plt.close() + #Plots distribution of min values per volume + plt.figure() + plt.title('Distribution of Dice values') + plt.hist(dice_list) + plt.xlabel('Dice score') + plt.ylabel('No. of Slices') + plt.savefig(save_path+'dice_dist.jpg') + # plt.show() + plt.close() def visualise_seg(loadpath): - """ - To visualise the segmentation performance(Qualitative results) - - Parameters - ---------- - - loadpath: str - Folder location from where the files are to be loaded - - Returns - ------- - None - - """ - - image_list = os.listdir(loadpath+'GT/') - count = 0 - for i in tq(image_list): - img = np.load(loadpath+'image/'+i) - GT = np.load(loadpath+'GT/'+i) - pred = np.load(loadpath+'pred/'+i) - - plt.figure(figsize = [15,5]) - plt.subplot(141) - plt.axis('off') - plt.title('Input Image') - plt.imshow(img[0][0],cmap = 'gray') - plt.subplot(142) - plt.axis('off') - plt.title('GT') - plt.imshow(GT[0],cmap = 'gray') - plt.subplot(143) - plt.axis('off') - plt.title('Pred') - plt.imshow(pred[0],cmap = 'gray') - plt.subplot(144) - plt.title('GT - Pred') - plt.axis('off') - test = GT[0]-pred[0] - test[test>0] = 1 - test[test<=0] = 0 - plt.imshow(test,cmap = 'gray') - count += 1 + """ + To visualise the segmentation performance(Qualitative results) + + Parameters + ---------- + + loadpath: str + Folder location from where the files are to be loaded + + Returns + ------- + None + + """ + + image_list = os.listdir(loadpath+'GT/') + count = 0 + for i in tq(image_list): + img = np.load(loadpath+'image/'+i) + GT = np.load(loadpath+'GT/'+i) + pred = np.load(loadpath+'pred/'+i) + + plt.figure(figsize = [15,5]) + plt.subplot(141) + plt.axis('off') + plt.title('Input Image') + plt.imshow(img[0][0],cmap = 'gray') + plt.subplot(142) + plt.axis('off') + plt.title('GT') + plt.imshow(GT[0],cmap = 'gray') + plt.subplot(143) + plt.axis('off') + plt.title('Pred') + plt.imshow(pred[0],cmap = 'gray') + plt.subplot(144) + plt.title('GT - Pred') + plt.axis('off') + test = GT[0]-pred[0] + test[test>0] = 1 + test[test<=0] = 0 + plt.imshow(test,cmap = 'gray') + count += 1 if not os.path.isdir(loadpath+'seg_results/op_images/'): os.makedirs(loadpath+'seg_results/op_images/') - plt.savefig(loadpath+'seg_results/op_images/img'+str(count)+'.jpg') + plt.savefig(loadpath+'seg_results/op_images/img'+str(count)+'.jpg') else: plt.savefig(loadpath+'seg_results/op_images/img'+str(count)+'.jpg') - - # if count>10: - # break - - - - - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py index 573fcb8d292..f2263e1ef4b 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py @@ -22,10 +22,10 @@ from PIL import Image from tqdm import tqdm_notebook as tq from sklearn.metrics import confusion_matrix -from dataloader import LungPatchDataLoader -from lenet import LeNet +from .data_loader import LungPatchDataLoader +from .lenet import LeNet -def infer_classifier(modelpath,imgpath): +def lungpatch_classifier(modelpath,imgpath): testDset = LungPatchDataLoader(imgpath,is_transform=True,split="test") testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py index ae265121d9a..5720332202d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py @@ -1,14 +1,10 @@ - - -import numpy as np import torch -import torch.nn as nn +from torch import nn import torch.nn.functional as F - class LeNet(nn.Module): def __init__(self): - super(LeNet, self).__init__() + super().__init__() self.conv1 = nn.Conv2d(1, 6, kernel_size=5) self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) self.conv2 = nn.Conv2d(6, 16, kernel_size=5) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py index 090f8bec646..e031234c413 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# coding: utf-8 - import numpy as np import torch import torch.nn as nn @@ -12,15 +9,13 @@ import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt -plt.switch_backend('agg') from .sumnet_bn_vgg import SUMNet from .r2unet import R2U_Net from .r2unet import U_Net -from torchvision import transforms import json -from PIL import Image from .data_loader import LungDataLoader from .utils import dice_coefficient +plt.switch_backend('agg') def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4): """Training function for SUMNet,UNet,R2Unet @@ -53,7 +48,7 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc fold = 'fold'+str(fold_no) savePath = save_path+'/'+network+'/'+fold+'/' if not os.path.isdir(savePath): - os.makedirs(savePath) + os.makedirs(savePath) with open(json_path+fold+'_pos_neg_eq.json') as f: json_file = json.load(f) @@ -76,9 +71,9 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc use_gpu = torch.cuda.is_available() if use_gpu: - net = net.cuda() + net = net.cuda() + - optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) criterion = nn.BCEWithLogitsLoss() @@ -120,26 +115,26 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss + net_loss = BCE_Loss optimizer.zero_grad() - net_loss.backward() - + net_loss.backward() + optimizer.step() - + trainRunningLoss += net_loss.item() - + trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - trainDice_lungs += trainDice[0] - - trainBatches += 1 + trainDice_lungs += trainDice[0] + + trainBatches += 1 # if trainBatches>1: # break trainLoss.append(trainRunningLoss/trainBatches) trainDiceCoeff_lungs.append(trainDice_lungs/trainBatches) - + with torch.no_grad(): for data1 in tq(validDataLoader): @@ -153,17 +148,17 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc net_out_sf = F.softmax(net_out.data,dim=1) - BCE_Loss = criterion(net_out[:,1],labels[:,1]) + BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss + net_loss = BCE_Loss val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - validDice_lungs += val_dice[0] + validDice_lungs += val_dice[0] validRunningLoss += net_loss.item() - validBatches += 1 + validBatches += 1 # if validBatches>1: - # break + # break validLoss.append(validRunningLoss/validBatches) validDiceCoeff_lungs.append(validDice_lungs/validBatches) @@ -173,7 +168,7 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc if (validDice_lungs.cpu() > bestValidDice_lungs): bestValidDice_lungs = validDice_lungs.cpu() torch.save(net.state_dict(), savePath+'sumnet_best_lungs.pt') - + plot=plt.figure() plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') @@ -184,7 +179,7 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc plt.savefig(savePath+'LossPlot.png') plt.close() epochEnd = time.time()-epochStart - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) @@ -247,6 +242,3 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc plt.ylabel('Dice coefficient') plt.savefig(savePath+'Dice_final.png') plt.close() - - - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py index b098335b48a..7b03512becb 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# coding: utf-8 - import numpy as np import torch import torch.nn as nn @@ -12,15 +9,13 @@ import torch.nn.functional as F from torch.autograd import Variable import matplotlib.pyplot as plt -plt.switch_backend('agg') from .sumnet_bn_vgg import SUMNet from .r2unet import U_Net from .r2unet import R2U_Net -from torchvision import transforms import json -from PIL import Image from .utils import dice_coefficient from .data_loader import LungDataLoader +plt.switch_backend('agg') class Discriminator(nn.Module): def __init__(self,in_ch, out_ch): @@ -29,12 +24,12 @@ def __init__(self,in_ch, out_ch): # input is (nc) x 64 x 64 nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), + nn.MaxPool2d(3), # state size. (64) x 32 x 32 nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), nn.BatchNorm2d(64 * 2), nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), + nn.MaxPool2d(3), nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), nn.BatchNorm2d(64 * 2), nn.LeakyReLU(0.2, inplace=True), @@ -42,11 +37,11 @@ def __init__(self,in_ch, out_ch): nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), nn.BatchNorm2d(64 * 4), nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), + nn.MaxPool2d(3), nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), nn.BatchNorm2d(64 * 4), nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(2), + nn.MaxPool2d(2), # state size. (64*4) x 8 x 8 nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), nn.LeakyReLU(0.2, inplace=True), @@ -58,7 +53,7 @@ def forward(self, input): def ch_shuffle(x): shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) - shuffIdx2 = 1-shuffIdx1 + shuffIdx2 = 1-shuffIdx1 d_in = torch.Tensor(x.size()).cuda() d_in[:,shuffIdx1] = x[:,0] d_in[:,shuffIdx2] = x[:,1] @@ -97,7 +92,7 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo fold = 'fold'+str(fold_no) savePath = savepath+'/'+network+'/'+fold+'/' if not os.path.isdir(savePath): - os.makedirs(savePath) + os.makedirs(savePath) with open(jsonpath+fold+'_pos_neg_eq.json') as f: json_file = json.load(f) @@ -118,7 +113,7 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo net = R2U_Net(img_ch=1,output_ch=2) - netD2 = Discriminator(in_ch=2,out_ch=2) + netD2 = Discriminator(in_ch=2,out_ch=2) use_gpu = torch.cuda.is_available() @@ -166,8 +161,8 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo net_out = net(Variable(inputs)) net_out_sf = F.softmax(net_out,dim=1) - - + + ############################ # DISCRIMINATOR 2 TRAINING # ############################ @@ -178,39 +173,39 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo # Shuffling aling dim 1: {real,fake} OR {fake,real} d_in,shuffLabel = ch_shuffle(d_in) # D2 prediction - confr = netD2(Variable(d_in)).view(d_in.size(0),-1) + confr = netD2(Variable(d_in)).view(d_in.size(0),-1) # Compute loss LD2 = criterionD(confr,shuffLabel.float().cuda()) # Compute gradients LD2.backward() - # Backpropagate + # Backpropagate optimizerD2.step() # Appending loss for each batch into the list - D2_losses.append(LD2.item()) + D2_losses.append(LD2.item()) optimizerD2.zero_grad() d2_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) - d2_in, d2_lb = ch_shuffle(d2_in) + d2_in, d2_lb = ch_shuffle(d2_in) conffs2 = netD2(d2_in).view(d2_in.size(0),-1) LGadv2 = criterionD(conffs2,d2_lb.float().cuda()) # Aversarial loss 2 - - + + BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss - 0.001*LGadv2 + net_loss = BCE_Loss - 0.001*LGadv2 optimizer.zero_grad() - net_loss.backward() - + net_loss.backward() + optimizer.step() - + trainRunningLoss += net_loss.item() - + trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - trainDice_lungs += trainDice[0] - - trainBatches += 1 + trainDice_lungs += trainDice[0] + + trainBatches += 1 # if trainBatches>1: # # break @@ -218,7 +213,7 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo trainDiceCoeff_lungs.append(trainDice_lungs/trainBatches) print("\n{}][{}]| Net_loss: {:.4f} | BCE_Loss: {:.4f} |adv_loss: {:.4f}" - .format(epoch,epochs,net_loss.item(),BCE_Loss,LGadv2) ) + .format(epoch,epochs,net_loss.item(),BCE_Loss,LGadv2) ) with torch.no_grad(): for data1 in tq(validDataLoader): @@ -233,17 +228,17 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo net_out_sf = F.softmax(net_out.data,dim=1) - BCE_Loss = criterion(net_out[:,1],labels[:,1]) + BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss + net_loss = BCE_Loss val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - validDice_lungs += val_dice[0] + validDice_lungs += val_dice[0] validRunningLoss += net_loss.item() - validBatches += 1 + validBatches += 1 # if validBatches>1: - # break + # break validLoss.append(validRunningLoss/validBatches) validDiceCoeff_lungs.append(validDice_lungs/validBatches) @@ -253,7 +248,7 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo if (validDice_lungs.cpu() > bestValidDice_lungs): bestValidDice_lungs = validDice_lungs.cpu() torch.save(net.state_dict(), savePath+'sumnet_adv_best_lungs.pt') - + plot=plt.figure() plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') @@ -264,7 +259,7 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo plt.savefig(savePath+'LossPlot.png') plt.close() epochEnd = time.time()-epochStart - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' + print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) @@ -327,4 +322,3 @@ def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epo plt.ylabel('Dice coefficient') plt.savefig(savePath+'Dice_final.png') plt.close() - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py new file mode 100644 index 00000000000..6a2ef4fb5fe --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py @@ -0,0 +1,129 @@ +""" +Modified from Pytorch `MaxUnpool2d` module to support ONNX conversion. + +Source: https://github.com/pytorch/pytorch/issues/25088#issuecomment-1090956803 +""" + +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.nn.modules.pooling import _MaxUnpoolNd +from torch.nn.modules.utils import _pair + +class MaxUnpool2dop(Function): + """We warp the `torch.nn.functional.max_unpool2d` + with an extra `symbolic` method, which is needed while exporting to ONNX. + Users should not call this function directly. + """ + + @staticmethod + def forward(ctx, input, indices, kernel_size, stride, padding, + output_size): + """Forward function of MaxUnpool2dop. + Args: + input (Tensor): Tensor needed to upsample. + indices (Tensor): Indices output of the previous MaxPool. + kernel_size (Tuple): Size of the max pooling window. + stride (Tuple): Stride of the max pooling window. + padding (Tuple): Padding that was added to the input. + output_size (List or Tuple): The shape of output tensor. + Returns: + Tensor: Output tensor. + """ + return F.max_unpool2d(input, indices, kernel_size, stride, padding, + output_size) + + @staticmethod + def symbolic(g, input, indices, kernel_size, stride, padding, output_size): + # get shape + input_shape = g.op('Shape', input) + const_0 = g.op('Constant', value_t=torch.tensor(0)) + const_1 = g.op('Constant', value_t=torch.tensor(1)) + batch_size = g.op('Gather', input_shape, const_0, axis_i=0) + channel = g.op('Gather', input_shape, const_1, axis_i=0) + + # height = (height - 1) * stride + kernel_size + height = g.op( + 'Gather', + input_shape, + g.op('Constant', value_t=torch.tensor(2)), + axis_i=0) + height = g.op('Sub', height, const_1) + height = g.op('Mul', height, + g.op('Constant', value_t=torch.tensor(stride[1]))) + height = g.op('Add', height, + g.op('Constant', value_t=torch.tensor(kernel_size[1]))) + + # width = (width - 1) * stride + kernel_size + width = g.op( + 'Gather', + input_shape, + g.op('Constant', value_t=torch.tensor(3)), + axis_i=0) + width = g.op('Sub', width, const_1) + width = g.op('Mul', width, + g.op('Constant', value_t=torch.tensor(stride[0]))) + width = g.op('Add', width, + g.op('Constant', value_t=torch.tensor(kernel_size[0]))) + + # step of channel + channel_step = g.op('Mul', height, width) + # step of batch + batch_step = g.op('Mul', channel_step, channel) + + # channel offset + range_channel = g.op('Range', const_0, channel, const_1) + range_channel = g.op( + 'Reshape', range_channel, + g.op('Constant', value_t=torch.tensor([1, -1, 1, 1]))) + range_channel = g.op('Mul', range_channel, channel_step) + range_channel = g.op('Cast', range_channel, to_i=7) # 7 is int64 + + # batch offset + range_batch = g.op('Range', const_0, batch_size, const_1) + range_batch = g.op( + 'Reshape', range_batch, + g.op('Constant', value_t=torch.tensor([-1, 1, 1, 1]))) + range_batch = g.op('Mul', range_batch, batch_step) + range_batch = g.op('Cast', range_batch, to_i=7) # 7 is int64 + + # update indices + indices = g.op('Add', indices, range_channel) + indices = g.op('Add', indices, range_batch) + + return g.op( + 'MaxUnpool', + input, + indices, + kernel_shape_i=kernel_size, + strides_i=stride) + + +class MaxUnpool2d(_MaxUnpoolNd): + """This module is modified from Pytorch `MaxUnpool2d` module. + Args: + kernel_size (int or tuple): Size of the max pooling window. + stride (int or tuple): Stride of the max pooling window. + Default: None (It is set to `kernel_size` by default). + padding (int or tuple): Padding that is added to the input. + Default: 0. + """ + + def __init__(self, kernel_size, stride=None, padding=0): + super(MaxUnpool2d, self).__init__() + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride or kernel_size) + self.padding = _pair(padding) + + def forward(self, input, indices, output_size=None): + """Forward function of MaxUnpool2d. + Args: + input (Tensor): Tensor needed to upsample. + indices (Tensor): Indices output of the previous MaxPool. + output_size (List or Tuple): The shape of output tensor. + Default: None. + Returns: + Tensor: Output tensor. + """ + return MaxUnpool2dop.apply(input, indices, self.kernel_size, + self.stride, self.padding, output_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py index 0b9e4b62c9c..5a94fc089b5 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py @@ -1,27 +1,14 @@ - -#!/usr/bin/env python -# coding: utf-8 - -from __future__ import print_function, division - import torch -import torch.nn as nn -import torch.optim as optim +from torch import nn +from torch import optim from torch.optim import lr_scheduler from torch.utils import data -from torchvision import transforms -import torchvision -from torchvision import datasets, models, transforms -import torch.nn.functional as F from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt import time import os -import copy -from PIL import Image from tqdm import tqdm_notebook as tq -from sklearn.metrics import confusion_matrix from .data_loader import LungPatchDataLoader from .lenet import LeNet @@ -51,15 +38,9 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): trainDataLoader = data.DataLoader(trainDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) validDataLoader = data.DataLoader(valDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) - savePath = savepath if not os.path.isdir(savePath): - os.makedirs(savePath) - - trainDset = LungDataLoader(is_transform=True,split="train") - valDset = LungDataLoader(is_transform=True,split="valid") - trainDataLoader = data.DataLoader(trainDset,batch_size=32,shuffle=True,num_workers=4,pin_memory=True) - validDataLoader = data.DataLoader(valDset,batch_size=32,shuffle=False,num_workers=4,pin_memory=True) + os.makedirs(savePath) net = LeNet() @@ -84,12 +65,12 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): trainRunningLoss = 0 validRunningLoss = 0 trainRunningCorrects = 0 - validRunningCorrects = 0 + validRunningCorrects = 0 trainBatches = 0 validBatches = 0 - + net.train(True) - + for data1 in tq(trainDataLoader): img, label = data1 if use_gpu: @@ -97,7 +78,7 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): label = label.cuda() net_out = net(Variable(inputs)) - + net_loss = criterion(net_out,label.float()) preds = torch.zeros(net_out.shape).cuda() preds[net_out > 0.5] = 1 @@ -105,10 +86,10 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): optimizer.zero_grad() - net_loss.backward() - + net_loss.backward() + optimizer.step() - + trainRunningLoss += net_loss.item() for i in range(len(preds[:,0])): if preds[:,0][i] == label[:,0][i].float(): @@ -116,15 +97,14 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): trainBatches += 1 # if trainBatches>1: - # break + # break trainepoch_loss = trainRunningLoss/trainBatches trainepoch_acc = 100*(int(trainRunningCorrects)/32594) trainLoss.append(trainepoch_loss) trainAcc.append(trainepoch_acc) - - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} |Train running : {:.5f}| Train acc: {:.5f} ' - .format(epoch+1, epochs, trainepoch_loss,trainRunningCorrects,trainepoch_acc)) + + print(f'Epoch: {epoch+1}/{epochs}, Train Loss:{trainepoch_loss}, Train acc:{trainepoch_acc}') with torch.no_grad(): for data1 in tq(validDataLoader): @@ -148,23 +128,22 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): validBatches += 1 # if validBatches>10: - # break + # break validepoch_loss = validRunningLoss/validBatches validepoch_acc = 100*(int(validRunningCorrects)/3666) validLoss.append(validepoch_loss) validAcc.append(validepoch_acc) - print('{:.0f} Loss: {:.4f} | accuracy: {:.4f} '.format( - epoch, validepoch_loss,validepoch_acc)) - - if (validepoch_acc > bestValidAcc): + print(f'Epoch: {epoch} Loss: {validepoch_loss} | accuracy: {validepoch_acc}') + + if validepoch_acc > bestValidAcc: bestValidAcc = validepoch_acc torch.save(net.state_dict(), savePath+'lenet_best.pt') - + scheduler.step(validepoch_loss) - - plot=plt.figure() + + plt.figure() plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') plt.xlabel('Epochs') @@ -174,19 +153,16 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): plt.savefig(savePath+'LossPlot.png') plt.close() epochEnd = time.time()-epochStart - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' - .format(epoch+1, epochs, trainepoch_loss, validepoch_loss)) - print('Accuracy | Train_acc {:.5f} | Valid_acc {:.5f} |' - .format(trainepoch_acc,validepoch_acc)) - + print(f'Epoch: {epoch+1}/{epochs} | Train Loss: {trainepoch_loss} | Valid Loss: {validepoch_loss}') + print('Accuracy | Train_acc {trainepoch_acc} | Valid_acc {validepoch_acc} |') - print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) + print(f'Time: {epochEnd//60}m {epochEnd%60}s') trainLoss_np = np.array(trainLoss) validLoss_np = np.array(validLoss) trainAcc_np = np.array(trainAcc) validAcc_np = np.array(validAcc) - print('Saving losses') + print(f'Saving losses') torch.save(trainLoss_np, savePath+'trainLoss.pt') torch.save(validLoss_np, savePath+'validLoss.pt') @@ -197,7 +173,7 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): # break end = time.time()-start - print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + print(f'Training completed in: {end//60}m {end%60}s') plt.figure() plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') @@ -208,7 +184,6 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): plt.savefig(savePath+'trainLossFinal.png') plt.close() - plt.figure() plt.plot(range(len(trainAcc)),trainAcc,'-r',label='Train') plt.plot(range(len(validAcc)),validAcc,'-g',label='Valid') @@ -217,4 +192,4 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): plt.ylabel('Accuracy') plt.title('Accuracy Plot') plt.savefig(savePath+'acc_plot.png') - plt.close() \ No newline at end of file + plt.close() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py index 88164e15411..8c6c6e0cb68 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py @@ -1,6 +1,5 @@ import torch -import torch.nn as nn -import torch.nn.functional as F +from torch import nn from torch.nn import init def init_weights(net, init_type='normal', gain=0.02): @@ -16,19 +15,19 @@ def init_func(m): elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=gain) else: - raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + raise NotImplementedError(f'initialization method {init_type} is not implemented') if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) - print('initialize network with %s' % init_type) + print(f'initialize network with {init_type}') net.apply(init_func) class conv_block(nn.Module): def __init__(self,ch_in,ch_out): - super(conv_block,self).__init__() + super().__init__() self.conv = nn.Sequential( nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), nn.BatchNorm2d(ch_out), @@ -45,7 +44,7 @@ def forward(self,x): class up_conv(nn.Module): def __init__(self,ch_in,ch_out): - super(up_conv,self).__init__() + super().__init__() self.up = nn.Sequential( nn.Upsample(scale_factor=2), nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), @@ -59,7 +58,7 @@ def forward(self,x): class Recurrent_block(nn.Module): def __init__(self,ch_out,t=2): - super(Recurrent_block,self).__init__() + super().__init__() self.t = t self.ch_out = ch_out self.conv = nn.Sequential( @@ -73,13 +72,13 @@ def forward(self,x): if i==0: x1 = self.conv(x) - + x1 = self.conv(x+x1) return x1 - + class RRCNN_block(nn.Module): def __init__(self,ch_in,ch_out,t=2): - super(RRCNN_block,self).__init__() + super().__init__() self.RCNN = nn.Sequential( Recurrent_block(ch_out,t=t), Recurrent_block(ch_out,t=t) @@ -94,7 +93,7 @@ def forward(self,x): class single_conv(nn.Module): def __init__(self,ch_in,ch_out): - super(single_conv,self).__init__() + super().__init__() self.conv = nn.Sequential( nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), nn.BatchNorm2d(ch_out), @@ -107,12 +106,12 @@ def forward(self,x): class Attention_block(nn.Module): def __init__(self,F_g,F_l,F_int): - super(Attention_block,self).__init__() + super().__init__() self.W_g = nn.Sequential( nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True), nn.BatchNorm2d(F_int) ) - + self.W_x = nn.Sequential( nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True), nn.BatchNorm2d(F_int) @@ -123,9 +122,9 @@ def __init__(self,F_g,F_l,F_int): nn.BatchNorm2d(1), nn.Sigmoid() ) - + self.relu = nn.ReLU(inplace=True) - + def forward(self,g,x): g1 = self.W_g(g) x1 = self.W_x(x) @@ -137,10 +136,9 @@ def forward(self,g,x): class U_Net(nn.Module): def __init__(self,img_ch=3,output_ch=1): - super(U_Net,self).__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + super().__init__() + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) self.Conv2 = conv_block(ch_in=64,ch_out=128) self.Conv3 = conv_block(ch_in=128,ch_out=256) @@ -152,10 +150,10 @@ def __init__(self,img_ch=3,output_ch=1): self.Up4 = up_conv(ch_in=512,ch_out=256) self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - + self.Up3 = up_conv(ch_in=256,ch_out=128) self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - + self.Up2 = up_conv(ch_in=128,ch_out=64) self.Up_conv2 = conv_block(ch_in=128, ch_out=64) @@ -168,7 +166,7 @@ def forward(self,x): x2 = self.Maxpool(x1) x2 = self.Conv2(x2) - + x3 = self.Maxpool(x2) x3 = self.Conv3(x3) @@ -181,9 +179,9 @@ def forward(self,x): # decoding + concat path d5 = self.Up5(x5) d5 = torch.cat((x4,d5),dim=1) - + d5 = self.Up_conv5(d5) - + d4 = self.Up4(d5) d4 = torch.cat((x3,d4),dim=1) d4 = self.Up_conv4(d4) @@ -203,31 +201,31 @@ def forward(self,x): class R2U_Net(nn.Module): def __init__(self,img_ch=3,output_ch=1,t=2): - super(R2U_Net,self).__init__() - + super().__init__() + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) self.Upsample = nn.Upsample(scale_factor=2) self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - + self.Up5 = up_conv(ch_in=1024,ch_out=512) self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - + self.Up4 = up_conv(ch_in=512,ch_out=256) self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - + self.Up3 = up_conv(ch_in=256,ch_out=128) self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - + self.Up2 = up_conv(ch_in=128,ch_out=64) self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) @@ -240,7 +238,7 @@ def forward(self,x): x2 = self.Maxpool(x1) x2 = self.RRCNN2(x2) - + x3 = self.Maxpool(x2) x3 = self.RRCNN3(x3) @@ -254,7 +252,7 @@ def forward(self,x): d5 = self.Up5(x5) d5 = torch.cat((x4,d5),dim=1) d5 = self.Up_RRCNN5(d5) - + d4 = self.Up4(d5) d4 = torch.cat((x3,d4),dim=1) d4 = self.Up_RRCNN4(d4) @@ -275,8 +273,8 @@ def forward(self,x): class AttU_Net(nn.Module): def __init__(self,img_ch=3,output_ch=1): - super(AttU_Net,self).__init__() - + super().__init__() + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) @@ -292,11 +290,11 @@ def __init__(self,img_ch=3,output_ch=1): self.Up4 = up_conv(ch_in=512,ch_out=256) self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - + self.Up3 = up_conv(ch_in=256,ch_out=128) self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - + self.Up2 = up_conv(ch_in=128,ch_out=64) self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) self.Up_conv2 = conv_block(ch_in=128, ch_out=64) @@ -310,7 +308,7 @@ def forward(self,x): x2 = self.Maxpool(x1) x2 = self.Conv2(x2) - + x3 = self.Maxpool(x2) x3 = self.Conv3(x3) @@ -323,9 +321,9 @@ def forward(self,x): # decoding + concat path d5 = self.Up5(x5) x4 = self.Att5(g=d5,x=x4) - d5 = torch.cat((x4,d5),dim=1) + d5 = torch.cat((x4,d5),dim=1) d5 = self.Up_conv5(d5) - + d4 = self.Up4(d5) x3 = self.Att4(g=d4,x=x3) d4 = torch.cat((x3,d4),dim=1) @@ -348,34 +346,34 @@ def forward(self,x): class R2AttU_Net(nn.Module): def __init__(self,img_ch=3,output_ch=1,t=2): - super(R2AttU_Net,self).__init__() - + super().__init__() + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) self.Upsample = nn.Upsample(scale_factor=2) self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - + self.Up5 = up_conv(ch_in=1024,ch_out=512) self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - + self.Up4 = up_conv(ch_in=512,ch_out=256) self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - + self.Up3 = up_conv(ch_in=256,ch_out=128) self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - + self.Up2 = up_conv(ch_in=128,ch_out=64) self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) @@ -389,7 +387,7 @@ def forward(self,x): x2 = self.Maxpool(x1) x2 = self.RRCNN2(x2) - + x3 = self.Maxpool(x2) x3 = self.RRCNN3(x3) @@ -404,7 +402,7 @@ def forward(self,x): x4 = self.Att5(g=d5,x=x4) d5 = torch.cat((x4,d5),dim=1) d5 = self.Up_RRCNN5(d5) - + d4 = self.Up4(d5) x3 = self.Att4(g=d4,x=x3) d4 = torch.cat((x3,d4),dim=1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py index e2fa72290b2..bbcee4d0805 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py @@ -1,86 +1,80 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Nov 6 22:57:38 2018 - -@author: sumanthnandamuri -""" import torch -import torch.nn as nn +from torch import nn import torch.nn.functional as F from torchvision import models +from .max_unpool_2d import MaxUnpool2d class SUMNet(nn.Module): - def __init__(self,in_ch,out_ch): - super(SUMNet, self).__init__() - - self.encoder = models.vgg11_bn(pretrained = True).features - self.preconv = nn.Conv2d(in_ch, 3, 1) - self.conv1 = self.encoder[0] - self.bn1 = self.encoder[1] - self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv2 = self.encoder[4] - self.bn2 = self.encoder[5] - self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv3a = self.encoder[8] - self.bn3 = self.encoder[9] - self.conv3b = self.encoder[11] - self.bn4 = self.encoder[12] - self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv4a = self.encoder[15] - self.bn5 = self.encoder[16] - self.conv4b = self.encoder[18] - self.bn6 = self.encoder[19] - self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv5a = self.encoder[22] - self.bn7 = self.encoder[23] - self.conv5b = self.encoder[25] - self.bn8 = self.encoder[26] - self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) - - self.unpool5 = nn.MaxUnpool2d(2, 2) - self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) - self.unpool4 = nn.MaxUnpool2d(2, 2) - self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) - self.unpool3 = nn.MaxUnpool2d(2, 2) - self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) - self.donv3a = nn.Conv2d(256,128, 3, padding = 1) - self.unpool2 = nn.MaxUnpool2d(2, 2) - self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) - self.unpool1 = nn.MaxUnpool2d(2, 2) - self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) - self.output = nn.Conv2d(32, out_ch, 1) - - def forward(self, x): - preconv = F.relu(self.preconv(x), inplace = True) - conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) - pool1, idxs1 = self.pool1(conv1) - conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) - pool2, idxs2 = self.pool2(conv2) - conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) - conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) - pool3, idxs3 = self.pool3(conv3b) - conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) - conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) - pool4, idxs4 = self.pool4(conv4b) - conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) - conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) - pool5, idxs5 = self.pool5(conv5b) - - unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) - donv5b = F.relu(self.donv5b(unpool5), inplace = True) - donv5a = F.relu(self.donv5a(donv5b), inplace = True) - unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) - donv4b = F.relu(self.donv4b(unpool4), inplace = True) - donv4a = F.relu(self.donv4a(donv4b), inplace = True) - unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) - donv3b = F.relu(self.donv3b(unpool3), inplace = True) - donv3a = F.relu(self.donv3a(donv3b)) - unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) - donv2 = F.relu(self.donv2(unpool2), inplace = True) - unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) - donv1 = F.relu(self.donv1(unpool1), inplace = True) - output = self.output(donv1) - return output \ No newline at end of file + def __init__(self,in_ch,out_ch): + super().__init__() + + self.encoder = models.vgg11_bn(pretrained = True).features + self.preconv = nn.Conv2d(in_ch, 3, 1) + self.conv1 = self.encoder[0] + self.bn1 = self.encoder[1] + self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv2 = self.encoder[4] + self.bn2 = self.encoder[5] + self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv3a = self.encoder[8] + self.bn3 = self.encoder[9] + self.conv3b = self.encoder[11] + self.bn4 = self.encoder[12] + self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv4a = self.encoder[15] + self.bn5 = self.encoder[16] + self.conv4b = self.encoder[18] + self.bn6 = self.encoder[19] + self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv5a = self.encoder[22] + self.bn7 = self.encoder[23] + self.conv5b = self.encoder[25] + self.bn8 = self.encoder[26] + self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) + + self.unpool5 = MaxUnpool2d(2, 2) + self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) + self.unpool4 = MaxUnpool2d(2, 2) + self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) + self.unpool3 = MaxUnpool2d(2, 2) + self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) + self.donv3a = nn.Conv2d(256,128, 3, padding = 1) + self.unpool2 = MaxUnpool2d(2, 2) + self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) + self.unpool1 = MaxUnpool2d(2, 2) + self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) + self.output = nn.Conv2d(32, out_ch, 1) + + def forward(self, x): + preconv = F.relu(self.preconv(x), inplace = True) + conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) + pool1, idxs1 = self.pool1(conv1) + conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) + pool2, idxs2 = self.pool2(conv2) + conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) + conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) + pool3, idxs3 = self.pool3(conv3b) + conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) + conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) + pool4, idxs4 = self.pool4(conv4b) + conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) + conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) + pool5, idxs5 = self.pool5(conv5b) + + unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) + donv5b = F.relu(self.donv5b(unpool5), inplace = True) + donv5a = F.relu(self.donv5a(donv5b), inplace = True) + unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) + donv4b = F.relu(self.donv4b(unpool4), inplace = True) + donv4a = F.relu(self.donv4a(donv4b), inplace = True) + unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) + donv3b = F.relu(self.donv3b(unpool3), inplace = True) + donv3a = F.relu(self.donv3a(donv3b)) + unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) + donv2 = F.relu(self.donv2(unpool2), inplace = True) + unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) + donv1 = F.relu(self.donv1(unpool1), inplace = True) + output = self.output(donv1) + return output diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index 3f89a37a4fd..f1a8487ea48 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -1,6 +1,4 @@ -import numpy as np import torch -from torch.utils import data def dice_coefficient(pred1, target): smooth = 1e-15 @@ -16,13 +14,12 @@ def dice_coefficient(pred1, target): m2_1 = target_1_hot[:,1,:,:].view(num, -1).float() m1_2 = pred_1_hot[:,2,:,:].view(num, -1).float() m2_2 = target_1_hot[:,2,:,:].view(num, -1).float() - + intersection_1 = (m1_1*m2_1).sum(1) intersection_2 = (m1_2*m2_2).sum(1) union_1 = (m1_1+m2_1).sum(1) + smooth - intersection_1 union_2 = (m1_2+m2_2).sum(1) + smooth - intersection_2 score_1 = intersection_1/union_1 - score_2 = intersection_2/union_2 return [score_1.mean()] \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py index e25d99fd20e..1625d043d84 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py @@ -1,46 +1,36 @@ -#!/usr/bin/env python -# coding: utf-8 - -import json import numpy as np -import os -import SimpleITK as sitk import matplotlib.pyplot as plt def visualize_data(series_uid,slice_num,datapath,savepath): - """ To visualize the image and nodule masks of the dataset - - Parameters - ---------- - - series_uid: str - Series_instance_uid or filename of the image to visualize - slice_num: int - Slice number to visulaize - datapath: str - Folder location where image and mask numpy is stored. - savepath: str - Folder location to save images - - - """ - - img_name = series_uid+'_slice'+str(slice_num)+'.npy' - mask = np.load(datapath+'mask/'+img_name) - img = np.load(datapath+'img/'+img_name) - lungseg = np.load(datapath+'lungseg/'+img_name) - - plt.figure() - plt.subplot(131) - plt.imshow(img,cmap='gray') - plt.title('Original Image') - plt.subplot(132) - plt.imshow(mask,cmap='gray') - plt.title('Ground truth (Lung)') - plt.subplot(133) - plt.imshow(mask,cmap='gray') - plt.title('Ground truth (Nodule)') - plt.savefig(savepath+'visualization.png') - plt.show() - plt.close() - + """ To visualize the image and nodule masks of the dataset + + Parameters + ---------- + + series_uid: str + Series_instance_uid or filename of the image to visualize + slice_num: int + Slice number to visulaize + datapath: str + Folder location where image and mask numpy is stored. + savepath: str + Folder location to save images + """ + + img_name = series_uid+'_slice'+str(slice_num)+'.npy' + mask = np.load(datapath+'mask/'+img_name) + img = np.load(datapath+'img/'+img_name) + + plt.figure() + plt.subplot(131) + plt.imshow(img,cmap='gray') + plt.title('Original Image') + plt.subplot(132) + plt.imshow(mask,cmap='gray') + plt.title('Ground truth (Lung)') + plt.subplot(133) + plt.imshow(mask,cmap='gray') + plt.title('Ground truth (Nodule)') + plt.savefig(savepath+'visualization.png') + plt.show() + plt.close() From beaab80155114afd8dbd4fb19d3505194e11d568 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Tue, 10 Jan 2023 00:34:04 +0530 Subject: [PATCH 04/47] updates to codes updated gitignore Added download script, plot_diagram, removed code repetition --- .../lung_nodule_detection/.gitignore | 3 +- .../configs/download_config.json | 20 +- .../configs/network_config.json | 4 - .../configs/stage1_config.json | 24 ++ .../configs/stage2_config.json | 17 + .../lung_nodule_detection/requirements.txt | 4 +- .../lung_nodule_detection/src/__init__.py | 0 .../lung_nodule_detection/src/export.py | 32 ++ .../src/train_network.py | 24 +- .../src/utils/data_loader.py | 2 - .../src/utils/discriminator.py | 45 +++ .../src/utils/downloader.py | 31 ++ .../src/utils/exporter.py | 47 +++ .../src/utils/generate_slices.py | 2 - .../src/utils/get_config.py | 23 ++ .../src/utils/infer_patch_classifier.py | 8 +- .../src/utils/lung_seg.py | 125 +++---- .../src/utils/lung_seg_adv.py | 324 ------------------ .../src/utils/patch_classifier.py | 75 ++-- .../lung_nodule_detection/src/utils/utils.py | 38 ++ 20 files changed, 360 insertions(+), 488 deletions(-) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/export.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/.gitignore b/misc/pytorch_toolkit/lung_nodule_detection/.gitignore index 6a01be53f44..1981a34898e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/.gitignore +++ b/misc/pytorch_toolkit/lung_nodule_detection/.gitignore @@ -1,3 +1,4 @@ model_weights/* test_data/* -temp_data/* \ No newline at end of file +temp_data/* +downloads/* diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json index 0c485aef63c..513b6f66dc2 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/download_config.json @@ -1,18 +1,10 @@ { - "stage1": { - "dest_path_data": "", - "url_data": "", - "url_model": "", - "dest_path_model": "" - }, - "stage2": { - "dest_path_data": "", - "url_data": "", - "url_model": "", - "dest_path_model": "" + "model_weights": { + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi11/model_weights.zip", + "dest_path_model": "downloads/model_weights.zip" }, "test_data": { - "dest_path": "", - "drive_url": " " + "dest_path_data": "downloads/test_data.zip", + "url_data": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi11/test_data.zip" } -} \ No newline at end of file +} diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json deleted file mode 100644 index cafb1ab4589..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/network_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{"train":{}, -"inference":{}, -"export": {} -} \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json new file mode 100644 index 00000000000..684b107da13 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json @@ -0,0 +1,24 @@ +{"train":{ + "fold_no": 4, + "save_path": "temp_data/stage1/", + "json_path": "downloads/test_data/fold4_pos_neg_eq.json", + "datapath": "downloads/test_data/stage1/", + "lung_segpath": "temp_data/stage1/", + "network": "sumnet", + "epochs": 5, + "lrate": 1e-4, + "adv": true +}, +"inference":{ + "fold_no": 4, + "save_path": "temp_data/stage1/", + "json_path": "downloads/test_data/fold4_pos_neg_eq.json", + "network": "sumnet" +}, +"export":{ + "checkpoint": "downloads/model_weights/lung_seg_adv_best.pth", + "input_shape":[1, 1, 512, 512], + "model_name_onnx": "lung_seg.onnx", + "model_name":"lung_seg" +} +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json new file mode 100644 index 00000000000..d5a9c5eabe5 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json @@ -0,0 +1,17 @@ +{"train":{ + "savepath": "temp_data/stage2/", + "imgpath": "test_data/stage2/", + "lrate": 1e-4, + "epochs": 5 +}, +"inference":{ + "modelpath":"temp_data/stage2/", + "imgpath":"test_data/stage2/" +}, +"export":{ + "checkpoint": "downloads/model_weights/patch_class_best.pth", + "input_shape":[1, 1, 64, 64], + "model_name_onnx": "patch_class.onnx", + "model_name":"patch_class" +} +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt index cabd5f67950..666be8bf153 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt +++ b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt @@ -2,8 +2,8 @@ torch torchvision torchmetrics numpy -openvino-dev[onnx]==2021.4.2 -onnxruntime==1.8.1 +openvino-dev[onnx] +onnxruntime wget tqdm pytest diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/export.py b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py new file mode 100644 index 00000000000..e3e7db2103a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py @@ -0,0 +1,32 @@ +from utils.exporter import Exporter +import argparse +from utils.get_config import get_config + +def export(args): + export_config = get_config(action='export', phase=args.phase) + exporter = Exporter(export_config, args.optimised) + + if args.onnx: + exporter.export_model_onnx() + if args.ir: + exporter.export_model_ir() + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--onnx", + required=False, + help="Set to True, if you wish to export onnx model", + default=False, + action='store_true') + parser.add_argument("--ir", + required=False, + help="Set to True, if you wish to export IR", + default=False, + action='store_true') + parser.add_argument('-ph', '--phase', type=int, + required=True, default=1, help='Phase') + + custom_args = parser.parse_args() + + export(custom_args) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py index 9e1d63507c9..853d6faab9e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py @@ -5,7 +5,7 @@ def main(args): - if args.lungseg: + if args.lungseg or args.lungsegadv: foldno = args.foldno savepath = args.savepath jsonpath = args.jsonpath @@ -13,22 +13,16 @@ def main(args): lungsegpath = args.lungmask network = args.network if args.epochs: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) - + if args.lungsegadv: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs,adv=True) + else: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) else: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) + if args.lungsegadv: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs,adv=True) + else: + lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) - elif args.lungsegadv: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - datapath = args.datapath - lungsegpath = args.lungmask - network = args.network - if args.epochs: - lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) - else: - lung_seg_adv.train_advnetwork(foldno,savepath,jsonpath,datapath,lungsegpath,network) else: savepath = args.savepath imgpath = args.datapath diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index 3b443b2248c..463e1883ae3 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -99,6 +99,4 @@ def __getitem__(self,index): def transform(self,img): img = torch.Tensor(img).unsqueeze(0) img = img.type(torch.FloatTensor) - - return img diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py new file mode 100644 index 00000000000..bbc71b1ca91 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py @@ -0,0 +1,45 @@ +import torch +from torch import nn + +class Discriminator(nn.Module): + def __init__(self,in_ch, out_ch): + super(Discriminator, self).__init__() + self.main = nn.Sequential( + # input is (nc) x 64 x 64 + nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + # state size. (64) x 32 x 32 + nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + # state size. (64*2) x 16 x 16 + nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(2), + # state size. (64*4) x 8 x 8 + nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.Sigmoid() + ) + def forward(self, input): + output = self.main(input) + return output.view(-1, 2) #.squeeze(1) + +def ch_shuffle(x): + shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) + shuffIdx2 = 1-shuffIdx1 + d_in = torch.Tensor(x.size()).cuda() + d_in[:,shuffIdx1] = x[:,0] + d_in[:,shuffIdx2] = x[:,1] + shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) + return d_in, shuffLabel diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py new file mode 100644 index 00000000000..213c80e733c --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py @@ -0,0 +1,31 @@ +from .get_config import get_config +import os +import zipfile +import wget + +def download_and_extract(path, url, expath): + wget.download(url, path) + with zipfile.ZipFile(path, 'r') as zip_ref: + zip_ref.extractall(expath) + +def download_checkpoint(): + config = get_config(action='download', config_path='configs/') + if not os.path.exists('downloads/model_weights/'): + os.makedirs('downloads/model_weights/') + model_url = config['model_weights']['url_model'] + model_path = config['model_weights']['dest_path_model'] + + download_and_extract(path=model_path, url=model_url, + expath='downloads/model_weights/') + +def download_data(): + config = get_config(action='download', config_path='configs/') + if not os.path.exists('downloads/test_data/'): + os.makedirs('downloads/test_data/') + data_url = config['test_data']['url_data'] + data_path = config['test_data']['dest_path_data'] + download_and_extract(path=data_path, url=data_url, expath='downloads/test_data/') + +if __name__ == '__main__': + download_data() + download_checkpoint() \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py new file mode 100644 index 00000000000..4c0b334cdc5 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py @@ -0,0 +1,47 @@ +import torch +import os +from .utils import load_model +from .utils import load_checkpoint + +class Exporter: + def __init__(self, config, stage): + self.config = config + self.checkpoint = config.get('checkpoint') + self.stage = stage + + self.model = load_model(network=config["network"]) + + self.model.eval() + load_checkpoint(self.model, self.checkpoint) + + def export_model_ir(self): + input_model = os.path.join( + os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) + input_shape = self.config.get('input_shape') + output_dir = os.path.split(self.checkpoint)[0] + export_command = f"""mo \ + --framework onnx \ + --input_model {input_model} \ + --input_shape "{input_shape}" \ + --output_dir {output_dir}""" + + if self.config.get('verbose_export'): + print(export_command) + os.system(export_command) + + def export_model_onnx(self): + + print(f"Saving model to {self.config.get('model_name_onnx')}") + res_path = os.path.join(os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) + + if self.stage == 2: + dummy_input = torch.randn(1, 1, 64, 64) + else: + dummy_input = torch.randn(1, 1, 512, 512) + + torch.onnx.export(self.model, dummy_input, res_path, + opset_version=11, do_constant_folding=True, + input_names=['input'], output_names=['output'], + dynamic_axes={'input': {0: 'batch_size'}, + 'output': {0: 'batch_size'}}, + verbose=False) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py index 703523c8365..325dc3dc6d8 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py @@ -90,8 +90,6 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): nods = scan.cluster_annotations() #Function used to determine which annotation belongs to which nodule - - nodule_dict={} #Dict to store number of contour markings for that nodule slice_list=[] # List to store the slices which has nodules marked points_dictx={} # These dicts are to store the points to be plotted (key=slice_index, ) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py new file mode 100644 index 00000000000..063b5fe1148 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py @@ -0,0 +1,23 @@ +import os +import json + +def get_config(action, stage=1, config_path=""): + + root_path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + config_path = os.path.join(root_path, 'configs') + + if action == 'download': + with open(os.path.join(config_path, 'download_config.json')) as f1: + config = json.load(f1) + else: + if stage == 1: + with open(os.path.join(config_path, 'stage1_config.json')) as f1: + config_file = json.load(f1) + config = config_file[action] + else: + with open(os.path.join(config_path, 'stage2_config.json')) as f1: + config_file = json.load(f1) + config = config_file[action] + + return config diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py index f2263e1ef4b..951aaae99b2 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py @@ -42,7 +42,7 @@ def lungpatch_classifier(modelpath,imgpath): criterion = nn.BCEWithLogitsLoss() scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5, verbose=True) testRunningCorrects = 0 - testRunningLoss = 0 + testRunningLoss = 0 testBatches = 0 pred_arr = [] label_arr = [] @@ -62,16 +62,14 @@ def lungpatch_classifier(modelpath,imgpath): testRunningLoss += net_loss.item() testRunningCorrects += torch.sum(preds == label.data.float()) - + for i,j in zip(preds.cpu().numpy(),label.cpu().numpy()): pred_arr.append(i) label_arr.append(j) testBatches += 1 # if testBatches>0: - # break - - + # break testepoch_loss = testRunningLoss/testBatches testepoch_acc = 100*(int(testRunningCorrects)/len(pred_arr)) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py index e031234c413..26e263866fa 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py @@ -14,10 +14,11 @@ from .r2unet import U_Net import json from .data_loader import LungDataLoader -from .utils import dice_coefficient +from .utils import dice_coefficient, plot_graphs +from .discriminator import Discriminator, ch_shuffle plt.switch_backend('agg') -def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4): +def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4,adv=False): """Training function for SUMNet,UNet,R2Unet Parameters @@ -46,9 +47,9 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc """ fold = 'fold'+str(fold_no) - savePath = save_path+'/'+network+'/'+fold+'/' - if not os.path.isdir(savePath): - os.makedirs(savePath) + save_path = save_path+'/'+network+'/'+fold+'/' + if not os.path.isdir(save_path): + os.makedirs(save_path) with open(json_path+fold+'_pos_neg_eq.json') as f: json_file = json.load(f) @@ -73,8 +74,14 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc if use_gpu: net = net.cuda() - optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + if adv: + netD2 = Discriminator(in_ch=2,out_ch=2) + if use_gpu: + netD2 = netD2.cuda() + optimizerD2 = optim.Adam(netD2.parameters(), lr = 1e-4, weight_decay = 1e-5) + criterionD = nn.BCELoss() + D2_losses = [] criterion = nn.BCEWithLogitsLoss() @@ -113,16 +120,33 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc net_out_sf = F.softmax(net_out,dim=1) - BCE_Loss = criterion(net_out[:,1],labels[:,1]) + if adv: + optimizerD2.zero_grad() + # Concatenate real (GT) and fake (segmented) samples along dim 1 + d_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + # Shuffling aling dim 1: {real,fake} OR {fake,real} + d_in,shuffLabel = ch_shuffle(d_in) + # D2 prediction + confr = netD2(Variable(d_in)).view(d_in.size(0),-1) + # Compute loss + LD2 = criterionD(confr,shuffLabel.float().cuda()) + # Compute gradients + LD2.backward() + # Backpropagate + optimizerD2.step() + # Appending loss for each batch into the list + D2_losses.append(LD2.item()) + optimizerD2.zero_grad() + d2_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + d2_in, d2_lb = ch_shuffle(d2_in) + conffs2 = netD2(d2_in).view(d2_in.size(0),-1) + LGadv2 = criterionD(conffs2,d2_lb.float().cuda()) # Aversarial loss 2 + BCE_Loss = criterion(net_out[:,1],labels[:,1]) net_loss = BCE_Loss - optimizer.zero_grad() - net_loss.backward() - optimizer.step() - trainRunningLoss += net_loss.item() trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) @@ -149,10 +173,8 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss - val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) validDice_lungs += val_dice[0] validRunningLoss += net_loss.item() @@ -163,21 +185,14 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc validLoss.append(validRunningLoss/validBatches) validDiceCoeff_lungs.append(validDice_lungs/validBatches) - - - if (validDice_lungs.cpu() > bestValidDice_lungs): + if validDice_lungs.cpu() > bestValidDice_lungs: bestValidDice_lungs = validDice_lungs.cpu() - torch.save(net.state_dict(), savePath+'sumnet_best_lungs.pt') - - plot=plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - if epoch==0: - plt.legend() - plt.savefig(savePath+'LossPlot.png') - plt.close() + torch.save(net.state_dict(), save_path+'sumnet_best_lungs.pt') + + plot_graphs(train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Running Loss', save_name='LossPlot.png') + epochEnd = time.time()-epochStart print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) @@ -185,18 +200,13 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) - trainLoss_np = np.array(trainLoss) - validLoss_np = np.array(validLoss) - trainDiceCoeff_lungs_np = np.array(trainDiceCoeff_lungs) - validDiceCoeff_lungs_np = np.array(validDiceCoeff_lungs) - print('Saving losses') - torch.save(trainLoss_np, savePath+'trainLoss.pt') - torch.save(validLoss_np, savePath+'validLoss.pt') - torch.save(trainDiceCoeff_lungs_np, savePath+'trainDice_lungs.pt') - torch.save(validDiceCoeff_lungs_np, savePath+'validDice_lungs.pt') + torch.save(trainLoss, save_path+'trainLoss.pt') + torch.save(validLoss, save_path+'validLoss.pt') + torch.save(trainDiceCoeff_lungs, save_path+'trainDice_lungs.pt') + torch.save(validDiceCoeff_lungs, save_path+'validDice_lungs.pt') # if epoch>1: # break @@ -204,41 +214,10 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc end = time.time()-start print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + plot_graphs(train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='LossPlotFinal.png') - plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - plt.title('Loss plot') - plt.legend() - plt.savefig(savePath+'trainLossFinal.png') - plt.close() - - - plt.figure() - plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Lungs') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.title('Dice coefficient: Train') - plt.savefig(savePath+'trainDice.png') - plt.close() - - plt.figure() - plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Lungs') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.title('Dice coefficient: Valid') - plt.savefig(savePath+'validDice.png') - plt.close() - - plt.figure() - plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Train') - plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Valid') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.savefig(savePath+'Dice_final.png') - plt.close() + plot_graphs(train_values=trainDiceCoeff_lungs, valid_values=validDiceCoeff_lungs, + save_path=save_path, x_label='Epochs', y_label='Dice coefficient', + plot_title='Dice coefficient', save_name='Dice_Plot.png') diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py deleted file mode 100644 index 7b03512becb..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg_adv.py +++ /dev/null @@ -1,324 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from torch import optim -from tqdm import tqdm as tq -import time -from torch.utils import data -import os -import torch.nn.functional as F -from torch.autograd import Variable -import matplotlib.pyplot as plt -from .sumnet_bn_vgg import SUMNet -from .r2unet import U_Net -from .r2unet import R2U_Net -import json -from .utils import dice_coefficient -from .data_loader import LungDataLoader -plt.switch_backend('agg') - -class Discriminator(nn.Module): - def __init__(self,in_ch, out_ch): - super(Discriminator, self).__init__() - self.main = nn.Sequential( - # input is (nc) x 64 x 64 - nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - # state size. (64) x 32 x 32 - nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 2), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 2), - nn.LeakyReLU(0.2, inplace=True), - # state size. (64*2) x 16 x 16 - nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 4), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 4), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(2), - # state size. (64*4) x 8 x 8 - nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), - nn.LeakyReLU(0.2, inplace=True), - nn.Sigmoid() - ) - def forward(self, input): - output = self.main(input) - return output.view(-1, 2) #.squeeze(1) - -def ch_shuffle(x): - shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) - shuffIdx2 = 1-shuffIdx1 - d_in = torch.Tensor(x.size()).cuda() - d_in[:,shuffIdx1] = x[:,0] - d_in[:,shuffIdx2] = x[:,1] - shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) - return d_in, shuffLabel - -def train_advnetwork(fold_no,savepath,jsonpath,datapath,lung_segpath,network,epochs=35,lrate=1e-4): - """Training function for SUMNet,UNet,R2Unet - - Parameters - ---------- - fold_no: str - Fold number on which training is to be performed - save_path: str - Folder location to save the models and other plots - json_path: str - Folder location at which json files are stored - datapath: str - Folder location of data - lung_segpath: str - Folder location at which lung segmentation files are stored - network: str - Network to be trained - epochs: int, Default: 35 - Number of epochs for training - lrate: int, Default= 1e-4 - Learnig rate - - Returns - ------- - - None - """ - - - fold = 'fold'+str(fold_no) - savePath = savepath+'/'+network+'/'+fold+'/' - if not os.path.isdir(savePath): - os.makedirs(savePath) - - with open(jsonpath+fold+'_pos_neg_eq.json') as f: - json_file = json.load(f) - train_set = json_file['train_set'] - val_set = json_file['valid_set'] - - - trainDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="train_set",img_size=512) - valDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) - trainDataLoader = data.DataLoader(trainDset,batch_size=8,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) - validDataLoader = data.DataLoader(valDset,batch_size=8,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) - - if network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) - if network == 'unet': - net = U_Net(img_ch=1,output_ch=2) - if network == 'r2unet': - net = R2U_Net(img_ch=1,output_ch=2) - - - netD2 = Discriminator(in_ch=2,out_ch=2) - - use_gpu = torch.cuda.is_available() - - if use_gpu: - net = net.cuda() - netD2 = netD2.cuda() - - - optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) - optimizerD2 = optim.Adam(netD2.parameters(), lr = 1e-4, weight_decay = 1e-5) - - - criterion = nn.BCEWithLogitsLoss() - criterionD = nn.BCELoss() - - epochs = epochs - trainLoss = [] - validLoss = [] - D2_losses = [] - trainDiceCoeff_lungs = [] - validDiceCoeff_lungs = [] - start = time.time() - - bestValidDice = torch.zeros(1) - bestValidDice_lungs = 0.0 - - for epoch in range(epochs): - epochStart = time.time() - trainRunningLoss = 0 - validRunningLoss = 0 - trainBatches = 0 - validBatches = 0 - trainDice_lungs = 0 - validDice_lungs = 0 - - - net.train(True) - - for data1 in tq(trainDataLoader): - img, mask = data1 - labels = mask - if use_gpu: - inputs = img.cuda() - labels = labels.cuda() - - net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out,dim=1) - - - ############################ - # DISCRIMINATOR 2 TRAINING # - ############################ - - optimizerD2.zero_grad() - # Concatenate real (GT) and fake (segmented) samples along dim 1 - d_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) - # Shuffling aling dim 1: {real,fake} OR {fake,real} - d_in,shuffLabel = ch_shuffle(d_in) - # D2 prediction - confr = netD2(Variable(d_in)).view(d_in.size(0),-1) - # Compute loss - LD2 = criterionD(confr,shuffLabel.float().cuda()) - # Compute gradients - LD2.backward() - # Backpropagate - optimizerD2.step() - # Appending loss for each batch into the list - D2_losses.append(LD2.item()) - optimizerD2.zero_grad() - d2_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) - d2_in, d2_lb = ch_shuffle(d2_in) - conffs2 = netD2(d2_in).view(d2_in.size(0),-1) - LGadv2 = criterionD(conffs2,d2_lb.float().cuda()) # Aversarial loss 2 - - - - BCE_Loss = criterion(net_out[:,1],labels[:,1]) - - net_loss = BCE_Loss - 0.001*LGadv2 - - optimizer.zero_grad() - - net_loss.backward() - - optimizer.step() - - trainRunningLoss += net_loss.item() - - trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - trainDice_lungs += trainDice[0] - - trainBatches += 1 - # if trainBatches>1: - # # break - - trainLoss.append(trainRunningLoss/trainBatches) - trainDiceCoeff_lungs.append(trainDice_lungs/trainBatches) - - print("\n{}][{}]| Net_loss: {:.4f} | BCE_Loss: {:.4f} |adv_loss: {:.4f}" - .format(epoch,epochs,net_loss.item(),BCE_Loss,LGadv2) ) - - with torch.no_grad(): - for data1 in tq(validDataLoader): - - imgs, mask = data1 - labels = mask - if use_gpu: - inputs = imgs.cuda() - labels = labels.cuda() - - net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out.data,dim=1) - - - BCE_Loss = criterion(net_out[:,1],labels[:,1]) - - net_loss = BCE_Loss - - - val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) - validDice_lungs += val_dice[0] - validRunningLoss += net_loss.item() - validBatches += 1 - # if validBatches>1: - # break - - validLoss.append(validRunningLoss/validBatches) - validDiceCoeff_lungs.append(validDice_lungs/validBatches) - - - - if (validDice_lungs.cpu() > bestValidDice_lungs): - bestValidDice_lungs = validDice_lungs.cpu() - torch.save(net.state_dict(), savePath+'sumnet_adv_best_lungs.pt') - - plot=plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - if epoch==0: - plt.legend() - plt.savefig(savePath+'LossPlot.png') - plt.close() - epochEnd = time.time()-epochStart - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' - .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) - print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' - .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) - - print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) - trainLoss_np = np.array(trainLoss) - validLoss_np = np.array(validLoss) - trainDiceCoeff_lungs_np = np.array(trainDiceCoeff_lungs) - validDiceCoeff_lungs_np = np.array(validDiceCoeff_lungs) - - - print('Saving losses') - - torch.save(trainLoss_np, savePath+'trainLoss.pt') - torch.save(validLoss_np, savePath+'validLoss.pt') - torch.save(trainDiceCoeff_lungs_np, savePath+'trainDice_lungs.pt') - torch.save(validDiceCoeff_lungs_np, savePath+'validDice_lungs.pt') - - # if epoch>0: - # break - - end = time.time()-start - print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) - - - plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - plt.title('Loss plot') - plt.legend() - plt.savefig(savePath+'trainLossFinal.png') - plt.close() - - - plt.figure() - plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Lungs') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.title('Dice coefficient: Train') - plt.savefig(savePath+'trainDice.png') - plt.close() - - plt.figure() - plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Lungs') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.title('Dice coefficient: Valid') - plt.savefig(savePath+'validDice.png') - plt.close() - - plt.figure() - plt.plot(range(len(trainDiceCoeff_lungs)),trainDiceCoeff_lungs,'-r',label='Train') - plt.plot(range(len(validDiceCoeff_lungs)),validDiceCoeff_lungs,'-g',label='Valid') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Dice coefficient') - plt.savefig(savePath+'Dice_final.png') - plt.close() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py index 5a94fc089b5..a050569492a 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py @@ -4,23 +4,23 @@ from torch.optim import lr_scheduler from torch.utils import data from torch.autograd import Variable -import numpy as np import matplotlib.pyplot as plt import time import os from tqdm import tqdm_notebook as tq from .data_loader import LungPatchDataLoader from .lenet import LeNet +from .utils import plot_graphs -def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): +def lungpatch_classifier(save_path,img_path,lrate=1e-4,epochs=35): """Trains network to classify patches based on the presence of nodule Parameters ---------- - savepath: str + save_path: str Folder location to save the plots and model - imgpath: + img_path: Folder location where patch images are stored. lrate: int,Default = 1e-4 Learning rate @@ -33,14 +33,14 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): None """ - trainDset = LungPatchDataLoader(imgpath=imgpath,is_transform=True,split="train_set") - valDset = LungPatchDataLoader(imgpath=imgpath,is_transform=True,split="valid_set") + trainDset = LungPatchDataLoader(img_path=img_path,is_transform=True,split="train_set") + valDset = LungPatchDataLoader(img_path=img_path,is_transform=True,split="valid_set") trainDataLoader = data.DataLoader(trainDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) validDataLoader = data.DataLoader(valDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) - savePath = savepath - if not os.path.isdir(savePath): - os.makedirs(savePath) + save_path = save_path + if not os.path.isdir(save_path): + os.makedirs(save_path) net = LeNet() @@ -139,57 +139,40 @@ def lungpatch_classifier(savepath,imgpath,lrate=1e-4,epochs=35): if validepoch_acc > bestValidAcc: bestValidAcc = validepoch_acc - torch.save(net.state_dict(), savePath+'lenet_best.pt') + torch.save(net.state_dict(), save_path+'lenet_best.pt') scheduler.step(validepoch_loss) - plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - if epoch==0: - plt.legend() - plt.savefig(savePath+'LossPlot.png') - plt.close() + plot_graphs( + train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='LossPlot.png') + epochEnd = time.time()-epochStart print(f'Epoch: {epoch+1}/{epochs} | Train Loss: {trainepoch_loss} | Valid Loss: {validepoch_loss}') print('Accuracy | Train_acc {trainepoch_acc} | Valid_acc {validepoch_acc} |') print(f'Time: {epochEnd//60}m {epochEnd%60}s') - trainLoss_np = np.array(trainLoss) - validLoss_np = np.array(validLoss) - trainAcc_np = np.array(trainAcc) - validAcc_np = np.array(validAcc) + print(f'Saving losses') - torch.save(trainLoss_np, savePath+'trainLoss.pt') - torch.save(validLoss_np, savePath+'validLoss.pt') - torch.save(trainAcc_np, savePath+'train_acc.pt') - torch.save(validAcc_np, savePath+'valid_acc.pt') + torch.save(trainLoss, save_path+'trainLoss.pt') + torch.save(validLoss, save_path+'validLoss.pt') + torch.save(trainAcc, save_path+'train_acc.pt') + torch.save(validAcc, save_path+'valid_acc.pt') # if epoch>1: # break end = time.time()-start print(f'Training completed in: {end//60}m {end%60}s') - plt.figure() - plt.plot(range(len(trainLoss)),trainLoss,'-r',label='Train') - plt.plot(range(len(validLoss)),validLoss,'-g',label='Valid') - plt.xlabel('Epochs') - plt.ylabel('Loss') - plt.title('Loss plot') - plt.legend() - plt.savefig(savePath+'trainLossFinal.png') - plt.close() - - plt.figure() - plt.plot(range(len(trainAcc)),trainAcc,'-r',label='Train') - plt.plot(range(len(validAcc)),validAcc,'-g',label='Valid') - plt.legend() - plt.xlabel('Epochs') - plt.ylabel('Accuracy') - plt.title('Accuracy Plot') - plt.savefig(savePath+'acc_plot.png') - plt.close() + plot_graphs( + train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='trainLossFinal.png') + + plot_graphs( + train_values=trainAcc, valid_values=validAcc, + save_path=save_path, x_label='Epochs', y_label='Accuracy', + plot_title='Accuracy Plot', save_name='acc_plot.png') diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index f1a8487ea48..d284e92376c 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -1,4 +1,12 @@ import torch +import matplotlib.pyplot as plt +import os +from .sumnet_bn_vgg import SUMNet +from .r2unet import U_Net, R2U_Net +from .lenet import LeNet + + + def dice_coefficient(pred1, target): smooth = 1e-15 @@ -22,4 +30,34 @@ def dice_coefficient(pred1, target): score_1 = intersection_1/union_1 return [score_1.mean()] + + +def load_model(network): + + if network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + elif network == 'r2unet': + net = R2U_Net(img_ch=1,output_ch=2) + elif network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + else: + net = LeNet() + return net + + +def plot_graphs( + train_values, valid_values, + save_path, x_label, y_label, + plot_title, save_name): + + plt.figure() + plt.plot(range(len(train_values)),train_values,'-r',label='Train') + plt.plot(range(len(valid_values)),valid_values,'-g',label='Valid') + plt.xlabel(x_label) + plt.ylabel(y_label) + plt.title(plot_title) + plt.legend() + plt.savefig(os.path.join(save_path, save_name)) + plt.close() + \ No newline at end of file From efd425f10d74db49a85c5b0b57ae65f2249cbe25 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Wed, 11 Jan 2023 03:30:01 +0530 Subject: [PATCH 05/47] Error in ONNX to IR conversion for sumnet --- .../configs/stage1_config.json | 7 +- .../configs/stage2_config.json | 13 +- .../lung_nodule_detection/requirements.txt | 7 +- .../lung_nodule_detection/src/export.py | 28 +- .../lung_nodule_detection/src/inference.py | 39 +- .../lung_nodule_detection/src/prepare_data.py | 9 +- .../lung_nodule_detection/src/scrap.py | 5 + .../src/train_network.py | 57 +- .../src/utils/data_loader.py | 1 - .../src/utils/data_prep/__init__.py | 0 .../src/utils/{ => data_prep}/create_folds.py | 12 +- .../utils/{ => data_prep}/generate_patches.py | 5 - .../utils/{ => data_prep}/generate_slices.py | 0 .../src/utils/{ => data_prep}/visualize.py | 0 .../src/utils/discriminator.py | 45 -- .../src/utils/exporter.py | 11 +- .../src/utils/get_config.py | 2 +- .../{infer_lung_seg.py => infer_stage1.py} | 12 +- ...er_patch_classifier.py => infer_stage2.py} | 21 +- .../lung_nodule_detection/src/utils/lenet.py | 32 - .../src/utils/max_unpool_2d.py | 17 + .../lung_nodule_detection/src/utils/models.py | 343 ++++++++++ .../.github/FUNDING.yml | 13 + .../.github/workflows/main.yml | 208 +++++++ .../src/utils/openvino_pytorch_layers/LICENSE | 201 ++++++ .../utils/openvino_pytorch_layers/README.md | 41 ++ .../utils/openvino_pytorch_layers/__init__.py | 0 .../utils/openvino_pytorch_layers/compare.py | 39 ++ .../examples/calculate_grid/calculate_grid.py | 24 + .../examples/calculate_grid/export_model.py | 40 ++ .../examples/complex_mul/complex_mul.py | 22 + .../examples/complex_mul/export_model.py | 43 ++ .../deformable_conv/deformable_conv.py | 51 ++ .../examples/deformable_conv/export_model.py | 111 ++++ .../examples/fft/export_model.py | 45 ++ .../examples/fft/fft.py | 73 +++ .../examples/grid_sample/export_model.py | 50 ++ .../examples/grid_sample/grid_sample.py | 12 + .../examples/sparse_conv/export_model.py | 69 ++ .../examples/sparse_conv/sparse_conv.py | 55 ++ .../examples/unpool/README.md | 47 ++ .../examples/unpool/export_model.py | 56 ++ .../examples/unpool/unpool.py | 17 + .../mo_extensions/front/onnx/max_unpool.py | 46 ++ .../mo_extensions/ops/MaxPoolGrad.py | 24 + .../openvino_extensions/__init__.py | 23 + .../utils/openvino_pytorch_layers/setup.py | 21 + .../tests/requirements.txt | 5 + .../tests/run_tests.py | 147 +++++ .../user_ie_extensions/CMakeLists.txt | 27 + .../user_ie_extensions/calculate_grid.cpp | 79 +++ .../user_ie_extensions/calculate_grid.hpp | 29 + .../user_ie_extensions/complex_mul.cpp | 89 +++ .../user_ie_extensions/complex_mul.hpp | 30 + .../user_ie_extensions/fft.cpp | 372 +++++++++++ .../user_ie_extensions/fft.hpp | 36 ++ .../user_ie_extensions/grid_sample.cpp | 125 ++++ .../user_ie_extensions/grid_sample.hpp | 30 + .../user_ie_extensions/ov_extension.cpp | 38 ++ .../user_ie_extensions/sparse_conv.cpp | 109 ++++ .../user_ie_extensions/sparse_conv.hpp | 33 + .../sparse_conv_transpose.cpp | 109 ++++ .../sparse_conv_transpose.hpp | 33 + .../user_ie_extensions/unpool.cpp | 86 +++ .../user_ie_extensions/unpool.hpp | 41 ++ .../lung_nodule_detection/src/utils/r2unet.py | 423 ------------- .../src/utils/scrap.ipynb | 589 ++++++++++++++++++ .../src/utils/sumnet_bn_vgg.py | 80 --- .../utils/{lung_seg.py => train_stage1.py} | 16 +- .../{patch_classifier.py => train_stage2.py} | 5 +- .../lung_nodule_detection/src/utils/utils.py | 24 +- 71 files changed, 3845 insertions(+), 707 deletions(-) create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/__init__.py rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{ => data_prep}/create_folds.py (99%) rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{ => data_prep}/generate_patches.py (99%) rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{ => data_prep}/generate_slices.py (100%) rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{ => data_prep}/visualize.py (100%) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{infer_lung_seg.py => infer_stage1.py} (98%) rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{infer_patch_classifier.py => infer_stage2.py} (86%) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{lung_seg.py => train_stage1.py} (97%) rename misc/pytorch_toolkit/lung_nodule_detection/src/utils/{patch_classifier.py => train_stage2.py} (98%) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json index 684b107da13..91c51e64bc9 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json @@ -16,9 +16,10 @@ "network": "sumnet" }, "export":{ - "checkpoint": "downloads/model_weights/lung_seg_adv_best.pth", - "input_shape":[1, 1, 512, 512], + "checkpoint": "downloads/model_weights/stage1/sumnet_adv_best_lungs.pt", + "input_shape":[2, 1, 512, 512], "model_name_onnx": "lung_seg.onnx", - "model_name":"lung_seg" + "model_name":"lung_seg", + "network": "sumnet" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json index d5a9c5eabe5..3f4e05aef0f 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json @@ -2,16 +2,19 @@ "savepath": "temp_data/stage2/", "imgpath": "test_data/stage2/", "lrate": 1e-4, - "epochs": 5 + "epochs": 5, + "network": "lenet" }, "inference":{ "modelpath":"temp_data/stage2/", - "imgpath":"test_data/stage2/" + "imgpath":"test_data/stage2/", + "network": "lenet" }, "export":{ - "checkpoint": "downloads/model_weights/patch_class_best.pth", + "checkpoint": "downloads/model_weights/stage2/lenet_best.pt", "input_shape":[1, 1, 64, 64], - "model_name_onnx": "patch_class.onnx", - "model_name":"patch_class" + "model_name_onnx": "lenet_best.onnx", + "model_name":"lenet_best", + "network": "lenet" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt index 666be8bf153..ea55fe6e700 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt +++ b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt @@ -1,11 +1,12 @@ -torch -torchvision -torchmetrics +torch==1.5.1 +torchvision==0.6.1 numpy openvino-dev[onnx] onnxruntime +onnx wget tqdm pytest matplotlib natsort +pylidc diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/export.py b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py index e3e7db2103a..a6a2e7f6235 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/export.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py @@ -1,14 +1,14 @@ -from utils.exporter import Exporter import argparse -from utils.get_config import get_config +from .utils.exporter import Exporter +from .utils.get_config import get_config -def export(args): - export_config = get_config(action='export', phase=args.phase) - exporter = Exporter(export_config, args.optimised) +def export(configs): + export_config = get_config(action='export', stage=configs["stage"]) + exporter = Exporter(export_config, stage=configs["stage"]) - if args.onnx: + if configs["onnx"]: exporter.export_model_onnx() - if args.ir: + if configs["ir"]: exporter.export_model_ir() if __name__ == '__main__': @@ -24,9 +24,15 @@ def export(args): help="Set to True, if you wish to export IR", default=False, action='store_true') - parser.add_argument('-ph', '--phase', type=int, - required=True, default=1, help='Phase') + parser.add_argument('-s', '--stage', type=int, + required=True, default=1, help='Stage') - custom_args = parser.parse_args() + args = parser.parse_args() - export(custom_args) + configs = { + "onnx": args.onnx, + "ir": args.ir, + "stage": args.stage + + } + export(configs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py index 0940e2e5392..227a4efb5d3 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py @@ -1,20 +1,19 @@ -from .utils import infer_lung_seg -from .utils import infer_patch_classifier +from .utils import infer_stage1 +from .utils import infer_stage2 import argparse -def main(args): +def main(config): - if args.lungseg: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - network = args.network - infer_lung_seg.infer_lungseg(foldno,savepath,network,jsonpath) + if config["lungseg"]: + foldno = config["foldno"] + savepath = config["savepath"] + jsonpath = config["jsonpath"] + network = config["network"] + infer_stage1.infer_lungseg(foldno,savepath,network,jsonpath) else: - savepath = args.savepath - imgpath = args.imgpath - infer_patch_classifier.lungpatch_classifier(savepath,imgpath) - + savepath = config["savepath"] + imgpath = config["imgpath"] + infer_stage2.lungpatch_classifier(savepath,imgpath) if __name__ == '__main__': @@ -35,6 +34,16 @@ def main(args): parser.add_argument('--network', help='Network to be trained') - args= parser.parse_args() + args = parser.parse_args() + + configs = { + "lungseg": args.lungseg, + "patchclass": args.patchclass, + "savepath" : args.savepath, + "foldno" : args.foldno, + "jsonpath" : args.jsonpath, + "imgpath" : args.imgpath, + "network" : args.network + } - main(args) + main(configs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py index 92b1250db28..8342b206185 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py @@ -1,8 +1,8 @@ import argparse -from .utils import visualize -from .utils import generate_slices -from .utils import create_folds -from .utils import generate_patches +from .utils.data_prep import visualize +from .utils.data_prep import generate_slices +from .utils.data_prep import create_folds +from .utils.data_prep import generate_patches def main(args): @@ -45,7 +45,6 @@ def main(args): generate_patches.generate_patchlist(jsonpath,patchtype,foldno) generate_patches.generate_negative_patch(jsonpath,foldno,data_path,lungsegpath,savepath,category) - elif args.visualize: seriesuid = args.seriesuid slice_num = args.sliceno diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py b/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py new file mode 100644 index 00000000000..79f315d6e10 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py @@ -0,0 +1,5 @@ +import torch +print(torch.__version__) + +import torchvision +print(torchvision.__version__) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py index 853d6faab9e..588a2ea1eae 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py @@ -1,33 +1,32 @@ -from utils import lung_seg -from utils import lung_seg_adv +from utils import train_stage1 from utils import patch_classifier import argparse -def main(args): +def main(config): - if args.lungseg or args.lungsegadv: - foldno = args.foldno - savepath = args.savepath - jsonpath = args.jsonpath - datapath = args.datapath - lungsegpath = args.lungmask - network = args.network - if args.epochs: - if args.lungsegadv: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs,adv=True) + if config["lungseg"] or config["lungsegadv"]: + foldno = config["foldno"] + savepath = config["savepath"] + jsonpath = config["jsonpath"] + datapath = config["datapath"] + lungsegpath = config["lungmask"] + network = config["network"] + if config["epochs"]: + if config["lungsegadv"]: + train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"],adv=True) else: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs) + train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"]) else: - if args.lungsegadv: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,args.epochs,adv=True) + if config["lungsegadv"]: + train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"],adv=True) else: - lung_seg.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) + train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) else: - savepath = args.savepath - imgpath = args.datapath - if args.epochs: - patch_classifier.lungpatch_classifier(savepath,imgpath,args.epochs) + savepath = config["savepath"] + imgpath = config["datapath"] + if config["epochs"]: + patch_classifier.lungpatch_classifier(savepath,imgpath,config["epochs"]) else: patch_classifier.lungpatch_classifier(savepath,imgpath) @@ -53,7 +52,6 @@ def main(args): help='Folder location where img and masks are stored') parser.add_argument('--lungmask', help='Folder location where lung masks are stored') - parser.add_argument('--network', help='Network to be trained') parser.add_argument('--epochs', @@ -62,4 +60,17 @@ def main(args): args= parser.parse_args() - main(args) + configs = { + "lungseg": args.lungseg, + "lungsegadv": args.lungsegadv, + "patchclass": args.patchclass, + "savepath": args.savepath, + "foldno": args.foldno, + "jsonpath": args.jsonpath, + "datapath": args.datapath, + "lungmask": args.lungmask, + "network": args.network, + "epochs": args.epochs + } + + main(configs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index 463e1883ae3..27f5c194390 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -5,7 +5,6 @@ from PIL import Image import numpy as np - class LungDataLoader(data.Dataset): """Class represents the dataloader for Lung segmentation task diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py similarity index 99% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py index aebb0beb01d..fb02a24d26e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/create_folds.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py @@ -1,13 +1,11 @@ -#!/usr/bin/env python -# coding: utf-8 +import json +import os +from collections import defaultdict import numpy as np -import os from natsort import natsorted from tqdm import tqdm as tq -from collections import defaultdict -import json -import argparse + def positive_negative_classifier(data_path,save_path): """Classifies slices as positive and negative slices. @@ -284,8 +282,6 @@ def create_balanced_dataset(save_path,data_path,additional=False): if additional == True: fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) - - print('Balanced dataset generated and saved') return fold_npy diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py similarity index 99% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py index 44b20c20afd..2795f1e1f1e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_patches.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py @@ -1,11 +1,6 @@ -#!/usr/bin/env python -# coding: utf-8 - -import pickle import numpy as np import os import cv2 -from skimage.util.shape import view_as_windows import json from tqdm import tqdm as tq import matplotlib.pyplot as plt diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/generate_slices.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/visualize.py similarity index 100% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/visualize.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/visualize.py diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py deleted file mode 100644 index bbc71b1ca91..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/discriminator.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -from torch import nn - -class Discriminator(nn.Module): - def __init__(self,in_ch, out_ch): - super(Discriminator, self).__init__() - self.main = nn.Sequential( - # input is (nc) x 64 x 64 - nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - # state size. (64) x 32 x 32 - nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 2), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 2), - nn.LeakyReLU(0.2, inplace=True), - # state size. (64*2) x 16 x 16 - nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 4), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(3), - nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), - nn.BatchNorm2d(64 * 4), - nn.LeakyReLU(0.2, inplace=True), - nn.MaxPool2d(2), - # state size. (64*4) x 8 x 8 - nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), - nn.LeakyReLU(0.2, inplace=True), - nn.Sigmoid() - ) - def forward(self, input): - output = self.main(input) - return output.view(-1, 2) #.squeeze(1) - -def ch_shuffle(x): - shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) - shuffIdx2 = 1-shuffIdx1 - d_in = torch.Tensor(x.size()).cuda() - d_in[:,shuffIdx1] = x[:,0] - d_in[:,shuffIdx2] = x[:,1] - shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) - return d_in, shuffLabel diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py index 4c0b334cdc5..3e7c429d525 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py @@ -8,9 +8,7 @@ def __init__(self, config, stage): self.config = config self.checkpoint = config.get('checkpoint') self.stage = stage - self.model = load_model(network=config["network"]) - self.model.eval() load_checkpoint(self.model, self.checkpoint) @@ -19,11 +17,14 @@ def export_model_ir(self): os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) input_shape = self.config.get('input_shape') output_dir = os.path.split(self.checkpoint)[0] + openvino_extension_path = '/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions' export_command = f"""mo \ --framework onnx \ --input_model {input_model} \ --input_shape "{input_shape}" \ - --output_dir {output_dir}""" + --output_dir {output_dir}\ + --log_level=DEBUG\ + --extension {openvino_extension_path}""" if self.config.get('verbose_export'): print(export_command) @@ -40,8 +41,6 @@ def export_model_onnx(self): dummy_input = torch.randn(1, 1, 512, 512) torch.onnx.export(self.model, dummy_input, res_path, - opset_version=11, do_constant_folding=True, input_names=['input'], output_names=['output'], - dynamic_axes={'input': {0: 'batch_size'}, - 'output': {0: 'batch_size'}}, + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, verbose=False) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py index 063b5fe1148..9beaa2176f5 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/get_config.py @@ -1,7 +1,7 @@ import os import json -def get_config(action, stage=1, config_path=""): +def get_config(action, stage=1, config_path="configs/"): root_path = os.path.dirname(os.path.dirname( os.path.dirname(os.path.realpath(__file__)))) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py similarity index 98% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py index f31fa7a284d..00cd7ba42bd 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_lung_seg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py @@ -1,14 +1,13 @@ -import numpy as np import torch -from tqdm import tqdm as tq from torch.utils import data -import os import torch.nn.functional as F from torch.autograd import Variable +import os +import numpy as np +from tqdm import tqdm as tq import matplotlib.pyplot as plt import json -from .sumnet_bn_vgg import SUMNet -from .r2unet import R2U_Net, U_Net +from .models import SUMNet, U_Net, R2U_Net from .data_loader import LungDataLoader from .utils import dice_coefficient plt.switch_backend('agg') @@ -108,7 +107,6 @@ def infer_lungseg(fold_no,save_path,network,jsonpath): dice = np.mean(dice_list) print("Result:",fold,dice) - #Plots distribution of min values per volume plt.figure() plt.title('Distribution of Dice values') @@ -119,6 +117,8 @@ def infer_lungseg(fold_no,save_path,network,jsonpath): # plt.show() plt.close() + return dice + def visualise_seg(loadpath): diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py similarity index 86% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py index 951aaae99b2..8c752e794b1 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py @@ -1,29 +1,14 @@ -#!/usr/bin/env python -# coding: utf-8 - - -from __future__ import print_function, division - import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.utils import data -from torchvision import transforms -import torchvision -from torchvision import datasets, models, transforms -import torch.nn.functional as F from torch.autograd import Variable import numpy as np -import matplotlib.pyplot as plt -import time -import os -import copy -from PIL import Image -from tqdm import tqdm_notebook as tq +from tqdm import tqdm as tq from sklearn.metrics import confusion_matrix from .data_loader import LungPatchDataLoader -from .lenet import LeNet +from .models import LeNet def lungpatch_classifier(modelpath,imgpath): @@ -88,3 +73,5 @@ def lungpatch_classifier(modelpath,imgpath): sensitivity = tp/(tp+fn) print('Specificity :',specificity) print('Sensitivity :',sensitivity) + + return testepoch_acc, specificity, sensitivity diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py deleted file mode 100644 index 5720332202d..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lenet.py +++ /dev/null @@ -1,32 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -class LeNet(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 6, kernel_size=5) - self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv2 = nn.Conv2d(6, 16, kernel_size=5) - self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv3 = nn.Conv2d(16, 16, kernel_size=5) - self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2) - self.conv3_drop = nn.Dropout2d() - self.fc1 = nn.Linear(256, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 2) - - def forward(self, x): - x = F.relu(self.conv1(x)) - x = self.pool1(x) - x = F.relu(self.conv2(x)) - x = self.pool2(x) - x = F.relu(self.conv3_drop(self.conv3(x))) - x = self.pool3(x) - x = x.view(-1, 256) - x = F.relu(self.fc1(x)) - x = F.dropout(x, training=self.training) - x = F.relu(self.fc2(x)) - x = F.dropout(x, training=self.training) - x = self.fc3(x) - return torch.sigmoid(x) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py index 6a2ef4fb5fe..96a759bad93 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py @@ -9,6 +9,7 @@ from torch.autograd import Function from torch.nn.modules.pooling import _MaxUnpoolNd from torch.nn.modules.utils import _pair +from torch import nn class MaxUnpool2dop(Function): """We warp the `torch.nn.functional.max_unpool2d` @@ -127,3 +128,19 @@ def forward(self, input, indices, output_size=None): """ return MaxUnpool2dop.apply(input, indices, self.kernel_size, self.stride, self.padding, output_size) + + +class Unpool2d(torch.autograd.Function): + @staticmethod + def symbolic(g, x, indices, output_size=None): + if output_size: + return g.op('Unpooling', x, indices, output_size) + else: + return g.op('Unpooling', x, indices) + + @staticmethod + def forward(self, x, indices, output_size=None): + if not output_size is None: + return nn.MaxUnpool2d(2, stride=2)(x, indices, output_size=output_size.size()) + else: + return nn.MaxUnpool2d(2, stride=2)(x, indices) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py new file mode 100644 index 00000000000..81ad49dc2f5 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py @@ -0,0 +1,343 @@ +import torch +from torch import nn +import torch.nn.functional as F +from torchvision import models +from .max_unpool_2d import Unpool2d as MaxUnpool2d + + +class SUMNet(nn.Module): + def __init__(self,in_ch,out_ch): + super().__init__() + + self.encoder = models.vgg11_bn(pretrained = True).features + self.preconv = nn.Conv2d(in_ch, 3, 1) + self.conv1 = self.encoder[0] + self.bn1 = self.encoder[1] + self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv2 = self.encoder[4] + self.bn2 = self.encoder[5] + self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv3a = self.encoder[8] + self.bn3 = self.encoder[9] + self.conv3b = self.encoder[11] + self.bn4 = self.encoder[12] + self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv4a = self.encoder[15] + self.bn5 = self.encoder[16] + self.conv4b = self.encoder[18] + self.bn6 = self.encoder[19] + self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) + self.conv5a = self.encoder[22] + self.bn7 = self.encoder[23] + self.conv5b = self.encoder[25] + self.bn8 = self.encoder[26] + self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) + + self.unpool5 = MaxUnpool2d() + self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) + self.unpool4 = MaxUnpool2d() + self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) + self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) + self.unpool3 = MaxUnpool2d() + self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) + self.donv3a = nn.Conv2d(256,128, 3, padding = 1) + self.unpool2 = MaxUnpool2d() + self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) + self.unpool1 = MaxUnpool2d() + self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) + self.output = nn.Conv2d(32, out_ch, 1) + + def forward(self, x): + preconv = F.relu(self.preconv(x), inplace = True) + conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) + pool1, idxs1 = self.pool1(conv1) + conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) + pool2, idxs2 = self.pool2(conv2) + conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) + conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) + pool3, idxs3 = self.pool3(conv3b) + conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) + conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) + pool4, idxs4 = self.pool4(conv4b) + conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) + conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) + pool5, idxs5 = self.pool5(conv5b) + + unpool5 = torch.cat([self.unpool5.apply(pool5, idxs5), conv5b], 1) + donv5b = F.relu(self.donv5b(unpool5), inplace = True) + donv5a = F.relu(self.donv5a(donv5b), inplace = True) + unpool4 = torch.cat([self.unpool4.apply(donv5a, idxs4), conv4b], 1) + donv4b = F.relu(self.donv4b(unpool4), inplace = True) + donv4a = F.relu(self.donv4a(donv4b), inplace = True) + unpool3 = torch.cat([self.unpool3.apply(donv4a, idxs3), conv3b], 1) + donv3b = F.relu(self.donv3b(unpool3), inplace = True) + donv3a = F.relu(self.donv3a(donv3b)) + unpool2 = torch.cat([self.unpool2.apply(donv3a, idxs2), conv2], 1) + donv2 = F.relu(self.donv2(unpool2), inplace = True) + unpool1 = torch.cat([self.unpool1.apply(donv2, idxs1), conv1], 1) + donv1 = F.relu(self.donv1(unpool1), inplace = True) + output = self.output(donv1) + return output + +class LeNet(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 6, kernel_size=5) + self.pool1 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv2 = nn.Conv2d(6, 16, kernel_size=5) + self.pool2 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3 = nn.Conv2d(16, 16, kernel_size=5) + self.pool3 = nn.MaxPool2d(kernel_size=2,stride=2) + self.conv3_drop = nn.Dropout2d() + self.fc1 = nn.Linear(256, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 2) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = self.pool1(x) + x = F.relu(self.conv2(x)) + x = self.pool2(x) + x = F.relu(self.conv3_drop(self.conv3(x))) + x = self.pool3(x) + x = x.view(-1, 256) + x = F.relu(self.fc1(x)) + x = F.dropout(x, training=self.training) + x = F.relu(self.fc2(x)) + x = F.dropout(x, training=self.training) + x = self.fc3(x) + return torch.sigmoid(x) + +class conv_block(nn.Module): + def __init__(self,ch_in,ch_out): + super().__init__() + self.conv = nn.Sequential( + nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True), + nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + + def forward(self,x): + x = self.conv(x) + return x + +class up_conv(nn.Module): + def __init__(self,ch_in,ch_out): + super().__init__() + self.up = nn.Sequential( + nn.Upsample(scale_factor=2), + nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + x = self.up(x) + return x + +class Recurrent_block(nn.Module): + def __init__(self,ch_out,t=2): + super().__init__() + self.t = t + self.ch_out = ch_out + self.conv = nn.Sequential( + nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True), + nn.BatchNorm2d(ch_out), + nn.ReLU(inplace=True) + ) + + def forward(self,x): + for i in range(self.t): + + if i==0: + x1 = self.conv(x) + + x1 = self.conv(x+x1) + return x1 + +class RRCNN_block(nn.Module): + def __init__(self,ch_in,ch_out,t=2): + super().__init__() + self.RCNN = nn.Sequential( + Recurrent_block(ch_out,t=t), + Recurrent_block(ch_out,t=t) + ) + self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0) + + def forward(self,x): + x = self.Conv_1x1(x) + x1 = self.RCNN(x) + return x+x1 + +class U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1): + super().__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) + self.Conv2 = conv_block(ch_in=64,ch_out=128) + self.Conv3 = conv_block(ch_in=128,ch_out=256) + self.Conv4 = conv_block(ch_in=256,ch_out=512) + self.Conv5 = conv_block(ch_in=512,ch_out=1024) + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_conv4 = conv_block(ch_in=512, ch_out=256) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_conv3 = conv_block(ch_in=256, ch_out=128) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_conv2 = conv_block(ch_in=128, ch_out=64) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.Conv1(x) + + x2 = self.Maxpool(x1) + x2 = self.Conv2(x2) + + x3 = self.Maxpool(x2) + x3 = self.Conv3(x3) + + x4 = self.Maxpool(x3) + x4 = self.Conv4(x4) + + x5 = self.Maxpool(x4) + x5 = self.Conv5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + + d5 = self.Up_conv5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_conv4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_conv3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_conv2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + +class R2U_Net(nn.Module): + def __init__(self,img_ch=3,output_ch=1,t=2): + super().__init__() + + self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) + self.Upsample = nn.Upsample(scale_factor=2) + + self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) + + self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) + + self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) + + self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) + + self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) + + + self.Up5 = up_conv(ch_in=1024,ch_out=512) + self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) + + self.Up4 = up_conv(ch_in=512,ch_out=256) + self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) + + self.Up3 = up_conv(ch_in=256,ch_out=128) + self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) + + self.Up2 = up_conv(ch_in=128,ch_out=64) + self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) + + self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) + + + def forward(self,x): + # encoding path + x1 = self.RRCNN1(x) + + x2 = self.Maxpool(x1) + x2 = self.RRCNN2(x2) + + x3 = self.Maxpool(x2) + x3 = self.RRCNN3(x3) + + x4 = self.Maxpool(x3) + x4 = self.RRCNN4(x4) + + x5 = self.Maxpool(x4) + x5 = self.RRCNN5(x5) + + # decoding + concat path + d5 = self.Up5(x5) + d5 = torch.cat((x4,d5),dim=1) + d5 = self.Up_RRCNN5(d5) + + d4 = self.Up4(d5) + d4 = torch.cat((x3,d4),dim=1) + d4 = self.Up_RRCNN4(d4) + + d3 = self.Up3(d4) + d3 = torch.cat((x2,d3),dim=1) + d3 = self.Up_RRCNN3(d3) + + d2 = self.Up2(d3) + d2 = torch.cat((x1,d2),dim=1) + d2 = self.Up_RRCNN2(d2) + + d1 = self.Conv_1x1(d2) + + return d1 + +class Discriminator(nn.Module): + def __init__(self,in_ch, out_ch): + super(Discriminator, self).__init__() + self.main = nn.Sequential( + # input is (nc) x 64 x 64 + nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + # state size. (64) x 32 x 32 + nn.Conv2d(64, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64*2, 64 * 2, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 2), + nn.LeakyReLU(0.2, inplace=True), + # state size. (64*2) x 16 x 16 + nn.Conv2d(64 * 2, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(3), + nn.Conv2d(64 * 4, 64 * 4, 3, 1, 0, bias=False), + nn.BatchNorm2d(64 * 4), + nn.LeakyReLU(0.2, inplace=True), + nn.MaxPool2d(2), + # state size. (64*4) x 8 x 8 + nn.Conv2d(64 * 4, out_ch, 7, 1, 0, bias=False), + nn.LeakyReLU(0.2, inplace=True), + nn.Sigmoid() + ) + def forward(self, input): + output = self.main(input) + return output.view(-1, 2) #.squeeze(1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml new file mode 100644 index 00000000000..7a26b0e5570 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: https://www.buymeacoffee.com/dkurt diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml new file mode 100644 index 00000000000..a089fe88281 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml @@ -0,0 +1,208 @@ +# This is a basic workflow to help you get started with Actions + +name: CI + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + OPENVINO_VERSION: 2022.1.0 + OPENCV_VERSION: 4.5.5 + VERSION: 2022.1.0.dev3 + DIST_WIN: https://registrationcenter-download.intel.com/akdlm/irc_nas/18618/w_openvino_toolkit_p_2022.1.0.643_offline.exe + DIST_MAC: https://registrationcenter-download.intel.com/akdlm/irc_nas/18616/m_openvino_toolkit_p_2022.1.0.643_offline.dmg + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + build_lnx: + runs-on: ubuntu-18.04 + container: + centos:centos8.4.2105 + + steps: + - uses: actions/checkout@v2 + + - name: Install dependencies + run: | + dnf -y --disablerepo '*' --enablerepo=extras swap centos-linux-repos centos-stream-repos + dnf -y distro-sync + yum group install -y "Development Tools" --nobest + yum install -y python3 wget cmake + python3 -m pip install --upgrade pip + + - name: Install OpenVINO + run: | + tee > /tmp/openvino-2022.repo << EOF + [OpenVINO] + name=Intel(R) Distribution of OpenVINO 2022 + baseurl=https://yum.repos.intel.com/openvino/2022 + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + EOF + mv /tmp/openvino-2022.repo /etc/yum.repos.d + yum repolist | grep -i openvino + yum install -y yum-utils openvino-2022.1.0 + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + make -j$(nproc --all) install + + - name: Build CPU extensions + run: | + source /opt/intel/openvino_2022/setupvars.sh + cd user_ie_extensions + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release + make -j$(nproc --all) + + - name: Build wheel + run: | + python3 -m pip install wheel + EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.so python3 setup.py build bdist_wheel + mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-manylinux2014_x86_64.whl + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_lnx" + path: "*.whl" + + build_win: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v2 + + - name: Install OpenVINO + run: | + Invoke-WebRequest ${{env.DIST_WIN}} -OutFile openvino.exe + Start-Process -Wait -FilePath "openvino.exe" -ArgumentList "-s -a --silent --eula accept" + shell: pwsh + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ..\\opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + cmake --build . --config Release -j 2 + cmake --install . --prefix "C:\opencv_install" + shell: cmd + + - name: Build CPU extensions + run: | + call "C:\Program Files (x86)\Intel\openvino_2022\setupvars.bat" + cd user_ie_extensions + mkdir build && cd build + cmake .. -DOpenCV_DIR="C:\opencv_install" + cmake --build . --config Release -j 2 + shell: cmd + + - name: Build wheel + run: | + python3 -m pip install --upgrade pip + python3 -m pip install wheel + ls user_ie_extensions\build\Release + set EXT_LIB=user_ie_extensions\\build\\Release\\user_cpu_extension.dll + python3 setup.py build bdist_wheel + move dist\\*.whl openvino_extensions-${{env.VERSION}}-py3-none-win_amd64.whl + shell: cmd + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_win" + path: "*.whl" + + build_mac: + runs-on: macos-10.15 + + steps: + - uses: actions/checkout@v2 + + - name: Install OpenVINO + run: | + curl ${{env.DIST_MAC}} -o openvino.dmg + hdiutil attach openvino.dmg + cd /Volumes/m_openvino_toolkit_p_2022.1.0.643_offline/bootstrapper.app/Contents/MacOS/ + sudo ./install.sh -s --eula=accept + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + make -j$(nproc --all) install + + - name: Build CPU extensions + run: | + source /opt/intel/openvino_2022/setupvars.sh + cd user_ie_extensions + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release + make -j$(nproc --all) + + - name: Build wheel + run: | + python3 -m pip install --upgrade pip + python3 -m pip install wheel + ls user_ie_extensions/build/ + EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.dylib python3 setup.py build bdist_wheel + mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-macosx_10_15_x86_64.whl + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_mac" + path: "*.whl" + + test_lnx: + needs: build_lnx + runs-on: ubuntu-18.04 + + steps: + - uses: actions/checkout@v2 + + - uses: actions/download-artifact@v2 + with: + name: wheel_lnx + + - name: Install dependencies + run: | + sudo apt-get install -y python3-setuptools libopencv-dev + python3 -m pip install --upgrade pip + python3 -m pip install -r tests/requirements.txt + python3 -m pip install -U protobuf + python3 -m pip install openvino-dev[onnx]==${{env.OPENVINO_VERSION}} + + # Also, remove "openvino_extensions" folder to avoid import confusion + - name: Install CPU extensions + run: | + rm -r openvino_extensions + python3 -m pip install *.whl + + - name: Test + run: | + python3 -m pytest tests/run_tests.py + + publish: + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} + needs: [test_lnx, build_win, build_mac] + runs-on: ubuntu-18.04 + steps: + - uses: actions/download-artifact@v2 + + - name: Publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + python3 -m pip install --upgrade pip + python3 -m pip install twine + python3 -m twine upload wheel*/*.whl --skip-existing diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md new file mode 100644 index 00000000000..b7cc66c5f79 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md @@ -0,0 +1,41 @@ +Repository with guides to enable some layers from PyTorch in Intel OpenVINO: + +[![CI](https://github.com/dkurt/openvino_pytorch_layers/workflows/CI/badge.svg?branch=master)](https://github.com/dkurt/openvino_pytorch_layers/actions?query=branch%3Amaster) + +* [nn.MaxUnpool2d](examples/unpool) +* [torch.fft](examples/fft) +* [nn.functional.grid_sample](https://github.com/dkurt/openvino_pytorch_layers/tree/master/examples/grid_sample) +* [torchvision.ops.DeformConv2d](examples/deformable_conv) +* [SparseConv](examples/sparse_conv) from [Open3D](https://github.com/isl-org/Open3D) + + +## OpenVINO Model Optimizer extension + +To create OpenVINO IR, use extra `--extension` flag to specify a path to Model Optimizer extensions that perform graph transformations and register custom layers. + +```bash +mo --input_model model.onnx --extension openvino_pytorch_layers/mo_extensions +``` + +## Custom CPU extensions + +You also need to build CPU extensions library which actually has C++ layers implementations: +```bash +source /opt/intel/openvino_2022/setupvars.sh + +cd user_ie_extensions +mkdir build && cd build +cmake .. -DCMAKE_BUILD_TYPE=Release && make -j$(nproc --all) +``` + +Add compiled extensions library to your project: + +```python +from openvino.runtime import Core + +core = Core() +core.add_extension('user_ie_extensions/build/libuser_cpu_extension.so') + +model = ie.read_model('model.xml') +compiled_model = ie.compile_model(model, 'CPU') +``` diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py new file mode 100644 index 00000000000..13fe7c81232 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py @@ -0,0 +1,39 @@ +# NOTE: import order is critical for now: extensions, openvino and only then numpy +from openvino_extensions import get_extensions_path +from openvino.inference_engine import IECore + +import argparse +import numpy as np + +parser = argparse.ArgumentParser(description='Compare OpenVINO implementation with reference data') +parser.add_argument('--num_inputs', type=int, default=1) +parser.add_argument('-m', '--model', default="model.xml") +parser.add_argument('-d', '--device', default="CPU") +args = parser.parse_args() + +inputs = {} +shapes = {} +for i in range(args.num_inputs): + suffix = '{}'.format(i if i > 0 else '') + data = np.load('inp' + suffix + '.npy') + inputs['input' + suffix] = data + shapes['input' + suffix] = data.shape + +ref = np.load('ref.npy') + +ie = IECore() +ie.add_extension(get_extensions_path(), 'CPU') +ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') + +net = ie.read_network(args.model) +net.reshape(shapes) +exec_net = ie.load_network(net, args.device) + +out = exec_net.infer(inputs) +out = next(iter(out.values())) + +maxdiff = np.max(np.abs(ref - out)) +print('Reference range: [{}, {}]'.format(np.min(ref), np.max(ref))) +print('Maximal difference:', maxdiff) +if maxdiff > 1e-5: + exit(1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py new file mode 100644 index 00000000000..a6c55adcf97 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py @@ -0,0 +1,24 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class CalculateGrid(torch.autograd.Function): + @staticmethod + def symbolic(g, in_positions): + return g.op("CalculateGrid", in_positions) + + @staticmethod + def forward(self, in_positions): + filter = torch.Tensor([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0], + [0, -1, -1], [0, -1, 0], [0, 0, -1], + [0, 0, 0]]).to(in_positions.device) + + out_pos = in_positions.long().repeat(1, filter.shape[0]).reshape(-1, 3) + filter = filter.repeat(in_positions.shape[0], 1) + + out_pos = out_pos + filter + out_pos = out_pos[out_pos.min(1).values >= 0] + out_pos = out_pos[(~((out_pos.long() % 2).bool()).any(1))] + out_pos = torch.unique(out_pos, dim=0) + + return out_pos + 0.5 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py new file mode 100644 index 00000000000..e7a3fd9d40e --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py @@ -0,0 +1,40 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .calculate_grid import CalculateGrid + + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.calculate_grid = CalculateGrid() + + def forward(self, x): + return self.calculate_grid.apply(x) + + +def export(num_points, max_grid_extent): + # Generate a list of unique positions and add a mantissa + np.random.seed(32) + torch.manual_seed(11) + + inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) + inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) + + model = MyModel() + with torch.no_grad(): + torch.onnx.export(model, (inp_pos), 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp_pos).detach().numpy() + + # Pad values with espetial end line (-1, 0, 0) and zeros + ref = np.concatenate((ref, [[-1, 0, 0]])) + ref = np.pad(ref, ((0, inp_pos.shape[0] - ref.shape[0]), (0, 0))) + + np.save('inp', inp_pos.detach().numpy()) + np.save('ref', ref) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py new file mode 100644 index 00000000000..d0a854b6c50 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + +class ComplexMul(torch.autograd.Function): + @staticmethod + def symbolic(g, input_tensor, other_tensor, is_conj = True): + return g.op("ComplexMultiplication", input_tensor, other_tensor, is_conj_i=int(is_conj)) + + @staticmethod + def forward(self, input_tensor, other_tensor): + complex_index = -1 + real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1] + imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0] + + multiplication = torch.cat( + [ + real_part.unsqueeze(dim=complex_index), + imaginary_part.unsqueeze(dim=complex_index), + ], + dim=complex_index, + ) + return multiplication diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py new file mode 100644 index 00000000000..564d93342e6 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py @@ -0,0 +1,43 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .complex_mul import ComplexMul + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.complex_mul = ComplexMul() + + def forward(self, x, y): + return self.complex_mul.apply(x, y) + +def export(inp_shape=[3, 2, 4, 8, 2], other_shape=[3, 2, 4, 8, 2]): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel() + inp = Variable(torch.randn(inp_shape)) + inp1 = Variable(torch.randn(other_shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, (inp, inp1), 'model.onnx', + input_names=['input', 'input1'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp, inp1) + np.save('inp', inp.detach().numpy()) + np.save('inp1', inp1.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--inp_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) + parser.add_argument('--other_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) + args = parser.parse_args() + + export(args.inp_shape, args.other_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py new file mode 100644 index 00000000000..fce9fa679ea --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +import torchvision.ops as ops + + +class DeformableConvFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, x, offset): + weight = cls.state_dict()["weight"] + weight = g.op("Constant", value_t=weight) + + return g.op( + "DeformableConv2D", + x, + offset, + weight, + strides_i=(cls.stride, cls.stride), + pads_i=(cls.padding, cls.padding, cls.padding, cls.padding), + dilations_i=(cls.dilation, cls.dilation), + deformable_group_i=cls.groups, + ) + + @staticmethod + def forward(self, cls, x, offset): + y = cls.origin_forward(x, offset) + return y + + +class DeformableConvolution(ops.DeformConv2d): + """ + This is a support class which helps export network with SparseConv in ONNX format. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + self.stride = kwargs.get("stride", 1) + self.padding = kwargs.get("padding", 0) + self.dilation = kwargs.get("dilation", 1) + self.groups = kwargs.get("groups", 1) + self.pad_l = nn.ConstantPad2d((1, 1, 1, 1), 0) + + def forward(self, x, offset): + """ + Using paddings is a workaround for 2021.4 release. + """ + x = self.pad_l(x) + offset = self.pad_l(offset) + y = DeformableConvFunc.apply(self, x, offset) + y = y[:, :, 1:-1, 1:-1] + return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py new file mode 100644 index 00000000000..a7630adedc0 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py @@ -0,0 +1,111 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .deformable_conv import DeformableConvolution + +np.random.seed(324) +torch.manual_seed(32) + + +class MyModel(nn.Module): + def __init__( + self, + inplanes, + outplanes, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + bias=False, + deformable_groups=1, + ): + super(MyModel, self).__init__() + self.def_conv = DeformableConvolution( + inplanes, + outplanes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + groups=deformable_groups, + ) + + def forward(self, x, offset): + y = self.def_conv(x, offset) + return y + + +def export( + inplanes, + outplanes, + kernel_size, + stride, + padding, + dilation, + deformable_groups, + inp_shape, + offset_shape, +): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel( + inplanes, + outplanes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + deformable_groups=deformable_groups, + ) + model.eval() + + x = Variable(torch.randn(inp_shape)) + offset = Variable(torch.randn(offset_shape)) + ref = model(x, offset) + + np.save("inp", x.detach().numpy()) + np.save("inp1", offset.detach().numpy()) + np.save("ref", ref.detach().numpy()) + + with torch.no_grad(): + torch.onnx.export( + model, + (x, offset), + "model.onnx", + input_names=["input", "input1"], + output_names=["output"], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH, + opset_version=12, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate ONNX model and test data") + parser.add_argument("--inp_shape", type=int, nargs="+", default=[1, 15, 128, 240]) + parser.add_argument( + "--offset_shape", type=int, nargs="+", default=[1, 18, 128, 240] + ) + parser.add_argument("--inplanes", type=int, nargs="+", default=15) + parser.add_argument("--outplanes", type=int, nargs="+", default=15) + parser.add_argument("--kernel_size", type=int, nargs="+", default=3) + parser.add_argument("--stride", type=int, nargs="+", default=1) + parser.add_argument("--padding", type=int, nargs="+", default=1) + parser.add_argument("--dilation", type=int, nargs="+", default=1) + parser.add_argument("--deformable_groups", type=int, nargs="+", default=1) + args = parser.parse_args() + + export( + args.inplanes, + args.outplanes, + args.kernel_size, + args.stride, + args.padding, + args.dilation, + args.deformable_groups, + args.inp_shape, + args.offset_shape, + ) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py new file mode 100644 index 00000000000..252c6c61207 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py @@ -0,0 +1,45 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .fft import FFT + + +class MyModel(nn.Module): + def __init__(self, inverse, centred, dims): + super(MyModel, self).__init__() + self.inverse = inverse + self.centred = centred + self.dims = dims + self.fft = FFT() + + def forward(self, x): + return self.fft.apply(x, self.inverse, self.centred, self.dims) + + +def export(shape, inverse, centered, dims): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel(inverse, centered, dims) + inp = Variable(torch.randn(shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, inp, 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH) + + ref = model(inp) + np.save('inp', inp.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8, 2]) + args = parser.parse_args() + + export(args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py new file mode 100644 index 00000000000..ccc6c872bd5 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py @@ -0,0 +1,73 @@ +import torch +from packaging import version +from typing import List, Tuple, Union + +def roll( + data: torch.Tensor, + shift: Union[int, Union[Tuple[int, ...], List[int]]], + dims: Union[int, Union[Tuple, List]], +) -> torch.Tensor: + """ + Similar to numpy roll but applies to pytorch tensors. + Parameters + ---------- + data : torch.Tensor + shift: tuple, int + dims : tuple, list or int + + Returns + ------- + torch.Tensor + """ + if isinstance(shift, (tuple, list)) and isinstance(dims, (tuple, list)): + if len(shift) != len(dims): + raise ValueError(f"Length of shifts and dimensions should be equal. Got {len(shift)} and {len(dims)}.") + for curr_shift, curr_dim in zip(shift, dims): + data = roll(data, curr_shift, curr_dim) + return data + dim_index = dims + shift = shift % data.size(dims) + + if shift == 0: + return data + left_part = data.narrow(dim_index, 0, data.size(dims) - shift) + right_part = data.narrow(dim_index, data.size(dims) - shift, shift) + return torch.cat([right_part, left_part], dim=dim_index) + +def fftshift(data: torch.Tensor, dims) -> torch.Tensor: + shift = [data.size(curr_dim) // 2 for curr_dim in dims] + return roll(data, shift, dims) + +def ifftshift(data: torch.Tensor, dims) -> torch.Tensor: + shift = [(data.size(curr_dim) + 1) // 2 for curr_dim in dims] + return roll(data, shift, dims) + +class FFT(torch.autograd.Function): + @staticmethod + def symbolic(g, x, inverse, centered, dims): + dims = torch.tensor(dims) + dims = g.op("Constant", value_t=dims) + + return g.op('FFT', x, dims, inverse_i=inverse, centered_i=centered) + + @staticmethod + def forward(self, x, inverse, centered, dims): + # https://pytorch.org/docs/stable/torch.html#torch.fft + if centered: + x = ifftshift(x, dims) + + if version.parse(torch.__version__) >= version.parse("1.8.0"): + func = torch.fft.ifftn if inverse else torch.fft.fftn + x = torch.view_as_complex(x) + y = func(x, dim=dims, norm="ortho") + y = torch.view_as_real(y) + else: + signal_ndim = max(dims) + assert dims == list(range(1, signal_ndim + 1)) + func = torch.ifft if inverse else torch.fft + y = func(input=x, signal_ndim=signal_ndim, normalized=True) + + if centered: + y = fftshift(y, dims) + + return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py new file mode 100644 index 00000000000..fe3098d72f7 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py @@ -0,0 +1,50 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .grid_sample import GridSample + + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.grid_sample = GridSample() + + def forward(self, x, grid): + return self.grid_sample.apply(x, grid) + + +def export(inp_shape=[5, 3, 6, 9], grid_shape=[5, 6, 9, 2]): + np.random.seed(324) + torch.manual_seed(32) + + if inp_shape[2] != grid_shape[1]: + raise Exception('Input height (got {}) should be equal to grid height (got {})'.format(inp_shape[2], grid_shape[1])) + if inp_shape[3] != grid_shape[2]: + raise Exception('Input width (got {}) should be equal to grid width (got {})'.format(inp_shape[3], grid_shape[2])) + + model = MyModel() + inp = Variable(torch.randn(inp_shape)) + grid = torch.Tensor(np.random.uniform(low=-2, high=2, size=grid_shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, (inp, grid), 'model.onnx', + input_names=['input', 'input1'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp, grid) + np.save('inp', inp.detach().numpy()) + np.save('inp1', grid.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--inp_shape', type=int, nargs='+', default=[5, 3, 6, 9]) + parser.add_argument('--grid_shape', type=int, nargs='+', default=[5, 6, 9, 2]) + args = parser.parse_args() + + export(args.inp_shape, args.grid_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py new file mode 100644 index 00000000000..f69fb7177ae --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py @@ -0,0 +1,12 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class GridSample(torch.autograd.Function): + @staticmethod + def symbolic(g, x, grid): + return g.op('GridSample', x, grid) + + @staticmethod + def forward(self, x, grid): + return F.grid_sample(x, grid, 'bilinear', 'zeros', True) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py new file mode 100644 index 00000000000..9f2467b720a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py @@ -0,0 +1,69 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .sparse_conv import SparseConvONNX, SparseConvTransposeONNX + + +def export(num_inp_points, num_out_points, max_grid_extent, in_channels, + filters, kernel_size, normalize, transpose): + np.random.seed(324) + torch.manual_seed(32) + + if transpose: + sparse_conv = SparseConvTransposeONNX(in_channels=in_channels, + filters=filters, + kernel_size=kernel_size, + use_bias=False, + normalize=False) + else: + sparse_conv = SparseConvONNX(in_channels=in_channels, + filters=filters, + kernel_size=kernel_size, + use_bias=False, + normalize=False) + + # Generate a list of unique positions and add a mantissa + def gen_pos(num_points): + inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) + inp_pos = np.unique(inp_pos, axis=0).astype(np.float32) + inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) + return inp_pos + + inp_pos = gen_pos(num_inp_points) + out_pos = gen_pos(num_out_points) if num_out_points else inp_pos + + features = torch.randn([inp_pos.shape[0], in_channels]) + + voxel_size = torch.tensor(1.0) + sparse_conv.eval() + + new_kernel = torch.randn(sparse_conv.state_dict()["kernel"].shape) + sparse_conv.load_state_dict({"kernel": new_kernel, + "offset": sparse_conv.state_dict()["offset"]}) + + with torch.no_grad(): + torch.onnx.export(sparse_conv, (features, inp_pos, out_pos, voxel_size), 'model.onnx', + input_names=['input', 'input1', 'input2', 'voxel_size'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = sparse_conv(features, inp_pos, out_pos, voxel_size) + np.save('inp', features.detach().numpy()) + np.save('inp1', inp_pos.detach().numpy()) + np.save('inp2', out_pos.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--num_points', type=int) + parser.add_argument('--max_grid_extent', type=int) + parser.add_argument('--in_channels', type=int) + parser.add_argument('--filters', type=int) + parser.add_argument('--kernel_size', type=int) + args = parser.parse_args() + + export(args.num_points, args.max_grid_extent, + args.in_channels, args.filters, args.kernel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py new file mode 100644 index 00000000000..54f4dbb309f --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py @@ -0,0 +1,55 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from open3d.ml.torch.layers import SparseConv, SparseConvTranspose + +class SparseConvFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): + kernel = cls.state_dict()["kernel"] + offset = cls.state_dict()["offset"] + kernel = g.op("Constant", value_t=kernel) + offset = g.op("Constant", value_t=offset) + return g.op("SparseConv", feat, in_pos, out_pos, kernel, offset) + + @staticmethod + def forward(self, cls, feat, in_pos, out_pos, voxel_size): + return cls.origin_forward(feat, in_pos, out_pos, voxel_size) + + +class SparseConvONNX(SparseConv): + """ + This is a support class which helps export network with SparseConv in ONNX format. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + + def forward(self, feat, in_pos, out_pos, voxel_size): + return SparseConvFunc.apply(self, feat, in_pos, out_pos, voxel_size) + + +class SparseConvTransposeFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): + kernel = cls.state_dict()["kernel"] + offset = cls.state_dict()["offset"] + kernel = g.op("Constant", value_t=kernel) + offset = g.op("Constant", value_t=offset) + return g.op("SparseConvTranspose", feat, in_pos, out_pos, kernel, offset) + + @staticmethod + def forward(self, cls, feat, in_pos, out_pos, voxel_size): + return cls.origin_forward(feat, in_pos, out_pos, voxel_size) + + +class SparseConvTransposeONNX(SparseConvTranspose): + """ + This is a support class which helps export network with SparseConvTranspose in ONNX format. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + + def forward(self, feat, in_pos, out_pos, voxel_size): + return SparseConvTransposeFunc.apply(self, feat, in_pos, out_pos, voxel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md new file mode 100644 index 00000000000..c46e767975d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md @@ -0,0 +1,47 @@ +Guide of how to enable PyTorch `nn.MaxUnpool2d` in Intel OpenVINO. + + +## Description +There are two problems with OpenVINO and MaxUnpool at the moment of this guide creation: + +* OpenVINO does not have Unpooling kernels +* PyTorch -> ONNX conversion is unimplemented for `nn.MaxUnpool2d` + +So following this guide you will learn +* How to perform PyTorch -> ONNX conversion for unsupported layers +* How to convert ONNX to OpenVINO Intermediate Respresentation (IR) with extensions +* How to write custom CPU layers in OpenVINO + +## Get ONNX model + +MaxUnpool layer in PyTorch takes two inputs - input `features` from any layer and `indices` after MaxPool layer: + +```python +self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) +self.unpool = nn.MaxUnpool2d(2, stride=2) + +output, indices = self.pool(x) +# ... +unpooled = self.unpool(features, indices) +``` + +If your version of PyTorch does not support ONNX model conversion with MaxUnpool, replace every unpool layer definition +```python +self.unpool = nn.MaxUnpool2d(2, stride=2) +``` +to +```python +self.unpool = Unpool2d() +``` + +where `Unpool2d` defined in [unpool.py](./unpool.py). Also, replace op usage from + +```python +self.unpool(features, indices) +``` +to +```python +self.unpool.apply(features, indices) +``` + +See complete example in [export_model.py](./export_model.py). diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py new file mode 100644 index 00000000000..e229e47adc3 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py @@ -0,0 +1,56 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .unpool import Unpool2d + +np.random.seed(324) +torch.manual_seed(32) + +class MyModel(nn.Module): + def __init__(self, mode): + super(MyModel, self).__init__() + self.mode = mode + self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, stride=1) + self.conv2 = nn.Conv2d(4, 4, kernel_size=1, stride=1) + self.unpool = Unpool2d() + + def forward(self, x): + y = self.conv1(x) + output, indices = self.pool(y) + conv = self.conv2(output) + if self.mode == 'default': + return self.unpool.apply(conv, indices) + elif self.mode == 'dynamic_size': + return self.unpool.apply(conv, indices, x) + else: + raise Exception('Unknown mode: ' + self.mode) + + +def export(mode, shape=[5, 3, 6, 8]): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel(mode) + inp = Variable(torch.randn(shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, inp, 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp) + np.save('inp', inp.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--mode', choices=['default', 'dynamic_size'], help='Specify Unpooling behavior') + parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8]) + args = parser.parse_args() + export(args.mode, args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py new file mode 100644 index 00000000000..52f961fe2cc --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py @@ -0,0 +1,17 @@ +import torch +import torch.nn as nn + +class Unpool2d(torch.autograd.Function): + @staticmethod + def symbolic(g, x, indices, output_size=None): + if output_size: + return g.op('Unpooling', x, indices, output_size) + else: + return g.op('Unpooling', x, indices) + + @staticmethod + def forward(self, x, indices, output_size=None): + if not output_size is None: + return nn.MaxUnpool2d(2, stride=2)(x, indices, output_size=output_size.size()) + else: + return nn.MaxUnpool2d(2, stride=2)(x, indices) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py new file mode 100644 index 00000000000..8e52a177bfd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py @@ -0,0 +1,46 @@ +# mo_extensions/front/onnx/max_unpool.py +import numpy as np + +from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph +from openvino.tools.mo.graph.graph import Graph +from mo_extensions.ops.MaxPoolGrad import MaxPoolGrad +from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr + +class MaxUnpool(FrontReplacementSubgraph): + enabled = True + + def pattern(self): + return dict( + nodes=[ + ('max_pool0', dict(op='MaxPool')), + ('max_pool1', dict(op='MaxPool')), + ('slice', dict(op='AttributedSlice')), + ('sub', dict(op='Sub')), + ('unpool', dict(op='Unpooling')), + ], + edges=[ + ('max_pool1', 'slice'), + ('max_pool0', 'sub', {'in': 0}), + ('slice', 'sub', {'in': 1}), + ('sub', 'unpool', {'in': 1}), + ]) + + @staticmethod + def replace_sub_graph(graph: Graph, match: dict): + max_pool = match['max_pool0'] + max_pool_input = max_pool.in_port(0).get_source().node + unpool = match['unpool'] + unpool_input = unpool.in_port(0).get_source().node + + max_pool.out_port(1).disconnect() + + # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] + inputs = [max_pool_input, max_pool, unpool_input] + + res = MaxPoolGrad(graph, dict(name=unpool.name + '/fused')).create_node(inputs) + unpool.out_port(0).get_connection().set_source(res.out_port(0)) + + if len(unpool.in_ports()) == 3: + unpool.in_port(2).get_source().connect(res.in_port(3)) + else: + max_pool_input.out_port(0).connect(res.in_port(3)) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py new file mode 100644 index 00000000000..4011e697695 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py @@ -0,0 +1,24 @@ +# mo_extensions/ops/MaxPoolGrad.py +import numpy as np +from openvino.tools.mo.graph.graph import Node, Graph +from openvino.tools.mo.ops.op import Op + +def shape_infer(node): + # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] + assert(len(node.in_nodes()) == 4) + node.out_node(0).shape = node.in_node(0).shape + node.out_node(0).shape[2] = node.in_node(3).shape[2] + node.out_node(0).shape[3] = node.in_node(3).shape[3] + +class MaxPoolGrad(Op): + op = 'MaxPoolGrad' + enabled = True + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'type': __class__.op, + 'op': __class__.op, + 'in_ports_count': 4, + 'out_ports_count': 1, + 'infer': shape_infer + }, attrs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py new file mode 100644 index 00000000000..ccef5c08085 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py @@ -0,0 +1,23 @@ +import os +import sys + +def get_extensions_path(): + lib_name = 'user_cpu_extension' + if sys.platform == 'win32': + lib_name += '.dll' + elif sys.platform == 'linux': + lib_name = 'lib' + lib_name + '.so' + else: + lib_name = 'lib' + lib_name + '.dylib' + return os.path.join(os.path.dirname(__file__), lib_name) + + +# This is a dummy procedure which instantiates onnx_importer library preloading +try: + import io + from openvino.inference_engine import IECore + ie = IECore() + buf = io.BytesIO() + ie.read_network(buf.getvalue(), b"", init_from_buffer=True) +except Exception: + pass diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py new file mode 100644 index 00000000000..aa3a9d6fb11 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +import os +from setuptools import setup + +if not 'VERSION' in os.environ: + raise Exception('Specify package version by environment variable') + +if not 'EXT_LIB' in os.environ: + raise Exception('Specify environment variable with a path to extensions library') + +setup(name='openvino-extensions', + version=os.environ['VERSION'], + author='Dmitry Kurtaev', + url='https://github.com/dkurt/openvino_pytorch_layers', + packages=['openvino_extensions'], + data_files=[('../../openvino_extensions', [os.environ['EXT_LIB']])], + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + ], +) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt new file mode 100644 index 00000000000..0ccfb5af363 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt @@ -0,0 +1,5 @@ +torch==1.8.1 +torchvision==0.9.1 +open3d==0.14.1 +tensorboard +pytest diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py new file mode 100644 index 00000000000..4cda23cbf2e --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py @@ -0,0 +1,147 @@ +# NOTE: import order is critical for now: extensions, openvino and only then numpy +from openvino_extensions import get_extensions_path +from openvino.runtime import Core + +import subprocess +import pytest +from pathlib import Path + +import numpy as np + +def convert_model(): + subprocess.run(['mo', + '--input_model=model.onnx', + # '--extension', "user_ie_extensions/build/libuser_cpu_extension.so"], + '--extension', get_extensions_path()], + check=True) + +def run_test(convert_ir=True, test_onnx=False, num_inputs=1, threshold=1e-5): + if convert_ir and not test_onnx: + convert_model() + + inputs = {} + shapes = {} + for i in range(num_inputs): + suffix = '{}'.format(i if i > 0 else '') + data = np.load('inp' + suffix + '.npy') + inputs['input' + suffix] = data + shapes['input' + suffix] = data.shape + + ref = np.load('ref.npy') + + ie = Core() + # ie.add_extension("user_ie_extensions/build/libuser_cpu_extension.so") + ie.add_extension(get_extensions_path()) + # ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') + + net = ie.read_model('model.onnx' if test_onnx else 'model.xml') + net.reshape(shapes) + exec_net = ie.compile_model(net, 'CPU') + + out = exec_net.infer_new_request(inputs) + out = next(iter(out.values())) + + assert ref.shape == out.shape + diff = np.max(np.abs(ref - out)) + assert diff <= threshold + + +# def test_unpool(): +# from examples.unpool.export_model import export +# export(mode='default') +# run_test() + + +# def test_unpool_reshape(): +# from examples.unpool.export_model import export +# export(mode='dynamic_size', shape=[5, 3, 6, 9]) +# run_test() + +# export(mode='dynamic_size', shape=[4, 3, 17, 8]) +# run_test(convert_ir=False) + +@pytest.mark.parametrize("shape", [[5, 120, 2], [4, 240, 320, 2], [3, 16, 240, 320, 2], [4, 5, 16, 31, 2]]) +@pytest.mark.parametrize("inverse", [False, True]) +@pytest.mark.parametrize("centered", [False, True]) +@pytest.mark.parametrize("test_onnx", [False, True]) +@pytest.mark.parametrize("dims", [[1], [1, 2], [2, 3]]) +def test_fft(shape, inverse, centered, test_onnx, dims): + from examples.fft.export_model import export + + if len(shape) == 3 and dims != [1] or \ + len(shape) == 4 and dims == [2, 3] or \ + len(shape) == 5 and dims == [1] or \ + centered and len(dims) != 2: + pytest.skip("unsupported configuration") + + export(shape, inverse, centered, dims) + run_test(test_onnx=test_onnx) + + +@pytest.mark.parametrize("test_onnx", [False, True]) +def test_grid_sample(test_onnx): + from examples.grid_sample.export_model import export + + export() + run_test(num_inputs=2, test_onnx=test_onnx) + + +@pytest.mark.parametrize("shape", [[3, 2, 4, 8, 2], [3, 1, 4, 8, 2]]) +@pytest.mark.parametrize("test_onnx", [False, True]) +def test_complex_mul(shape, test_onnx): + from examples.complex_mul.export_model import export + + export(other_shape=shape) + run_test(num_inputs=2, test_onnx=test_onnx) + + +@pytest.mark.parametrize("in_channels", [1, 3]) +@pytest.mark.parametrize("filters", [1, 4]) +@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5], [2, 2, 2]]) +@pytest.mark.parametrize("normalize", [False, True]) +@pytest.mark.parametrize("out_pos", [None, 16]) +def test_sparse_conv(in_channels, filters, kernel_size, normalize, out_pos): + from examples.sparse_conv.export_model import export + + export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, + filters=filters, kernel_size=kernel_size, normalize=normalize, + transpose=False) + run_test(num_inputs=3, test_onnx=True, threshold=1e-4) + + +@pytest.mark.parametrize("in_channels", [1, 3]) +@pytest.mark.parametrize("filters", [1, 4]) +@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5]]) +@pytest.mark.parametrize("normalize", [False]) +@pytest.mark.parametrize("out_pos", [None, 16]) +def test_sparse_conv_transpose(in_channels, filters, kernel_size, normalize, out_pos): + from examples.sparse_conv.export_model import export + + export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, + filters=filters, kernel_size=kernel_size, normalize=normalize, + transpose=True) + run_test(num_inputs=3, test_onnx=True, threshold=1e-4) + + +def test_calculate_grid(): + from examples.calculate_grid.export_model import export + export(num_points=10, max_grid_extent=5) + run_test(test_onnx=True) + + +def test_deformable_conv(): + from examples.deformable_conv.export_model import export + + export( + inplanes=15, + outplanes=15, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + deformable_groups=1, + inp_shape=[1, 15, 128, 240], + offset_shape=[1, 18, 128, 240], + ) + run_test(num_inputs=2, threshold=2e-5) + run_test(num_inputs=2, test_onnx=True, threshold=2e-5) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt new file mode 100644 index 00000000000..f5b0576a26d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt @@ -0,0 +1,27 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# [cmake:extension] +set(CMAKE_CXX_STANDARD 11) + +set(TARGET_NAME "user_cpu_extension") + +find_package(OpenVINO REQUIRED) +find_package(OpenCV REQUIRED COMPONENTS core) +# find_package(TBB REQUIRED tbb tbbmalloc) + +file(GLOB_RECURSE SRC *.cpp) + +add_library(${TARGET_NAME} SHARED ${SRC}) + +target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_OPENVINO_EXTENSION_API) + +if (OpenCV_FOUND) + target_include_directories(${TARGET_NAME} PRIVATE ${OpenCV_INCLUDE_DIRS}) +endif() + +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime + # ${TBB_IMPORTED_TARGETS} + ) +# [cmake:extension] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp new file mode 100644 index 00000000000..5545c737013 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "calculate_grid.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +CalculateGrid::CalculateGrid(const ov::Output& inp_pos) : Op({inp_pos}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void CalculateGrid::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr CalculateGrid::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 1, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool CalculateGrid::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool CalculateGrid::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inpPos = reinterpret_cast(inputs[0].data()); + float* out = reinterpret_cast(outputs[0].data()); + + std::set > outPos; + + const size_t numPoints = inputs[0].get_shape()[0]; + static const std::vector > filters {{-1, -1, -1}, {-1, -1, 0}, {-1, 0, -1}, + {-1, 0, 0}, {0, -1, -1}, {0, -1, 0}, + {0, 0, -1}, {0, 0, 0}}; + + std::vector pos(3); + for (size_t i = 0; i < numPoints; ++i) { + for (size_t j = 0; j < filters.size(); ++j) { + bool isValid = true; + for (size_t k = 0; k < 3; ++k) { + int val = static_cast(inpPos[i * 3 + k]) + filters[j][k]; + if (val < 0 || val % 2) { + isValid = false; + break; + } + pos[k] = val; + } + if (isValid) + outPos.insert(std::make_tuple(pos[0], pos[1], pos[2])); + } + } + + int i = 0; + for (const auto it : outPos) { + out[i * 3] = 0.5f + std::get<0>(it); + out[i * 3 + 1] = 0.5f + std::get<1>(it); + out[i * 3 + 2] = 0.5f + std::get<2>(it); + i += 1; + } + memset(out + i * 3, 0, sizeof(float) * 3 * (numPoints - i)); + out[i * 3] = -1.0f; + return true; +} + +bool CalculateGrid::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp new file mode 100644 index 00000000000..b436f2d39d0 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class CalculateGrid : public ov::op::Op { +public: + OPENVINO_OP("CalculateGrid"); + + CalculateGrid() = default; + CalculateGrid(const ov::Output& inp_pos); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp new file mode 100644 index 00000000000..86f4af2e464 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "complex_mul.hpp" +// #include +#include + +using namespace TemplateExtension; + +//! [op:ctor] +ComplexMultiplication::ComplexMultiplication( + const ov::Output& inp0, + const ov::Output& inp1) : Op({inp0, inp1}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void ComplexMultiplication::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(1), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr ComplexMultiplication::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool ComplexMultiplication::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool ComplexMultiplication::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inp0 = reinterpret_cast(inputs[0].data()); + const float* inp1 = reinterpret_cast(inputs[1].data()); + float* out = reinterpret_cast(outputs[0].data()); + + size_t channels0 = inputs[0].get_shape()[1]; + size_t channels1 = inputs[1].get_shape()[1]; + size_t batch = inputs[0].get_shape()[0]; + size_t spatialSize = inputs[0].get_shape()[2] * inputs[0].get_shape()[3]; + + // x1 = x_r * y_r - x_i * y_i + // x2 = x_r * y_i + x_i * y_r + if (channels0 == channels1) + // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { + for (size_t ch = 0; ch < channels0 * batch; ++ch) { + for (int i = 0; i < spatialSize; ++i) { + int outIdx = (ch * spatialSize + i) * 2; + float real0 = inp0[outIdx]; + float imag0 = inp0[outIdx + 1]; + float real1 = inp1[outIdx]; + float imag1 = inp1[outIdx + 1]; + out[outIdx] = real0 * real1 - imag0 * imag1; + out[outIdx + 1] = real0 * imag1 + imag0 * real1; + } + } + else if (channels1 == 1) + // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { + for (size_t ch = 0; ch < channels0 * batch; ++ch) { + int b = ch / channels0; + for (int i = 0; i < spatialSize; ++i) { + int outIdx = (ch * spatialSize + i) * 2; + int inpIdx = (b * spatialSize + i) * 2; + float real0 = inp0[outIdx]; + float imag0 = inp0[outIdx + 1]; + float real1 = inp1[inpIdx]; + float imag1 = inp1[inpIdx + 1]; + out[outIdx] = real0 * real1 - imag0 * imag1; + out[outIdx + 1] = real0 * imag1 + imag0 * real1; + } + } + else + IE_THROW() << "Wrong number of channels for second input!"; + + return true; +} + +bool ComplexMultiplication::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp new file mode 100644 index 00000000000..9dd487f3a0a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class ComplexMultiplication : public ov::op::Op { +public: + OPENVINO_OP("ComplexMultiplication"); + + ComplexMultiplication() = default; + ComplexMultiplication(const ov::Output& inp0, + const ov::Output& inp1); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp new file mode 100644 index 00000000000..86c8185097f --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp @@ -0,0 +1,372 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fft.hpp" + +#include +#include
+#include + +using namespace TemplateExtension; + +std::unique_ptr so; +using cvCreateMatHeaderF = CvMat*(int, int, int); +using cvSetDataF = void(CvArr*, void*, int); +using cvReleaseMatF = void(CvMat**); +using cvDftF = void(const CvArr*, CvArr*, int, int); +using cvScaleF = void(const CvArr*, CvArr*, double, double); +using cvCloneMatF = CvMat*(const CvMat*); +using cvCopyF = void(const CvArr*, const CvArr*, const CvArr*); +using cvInitMatHeaderF = CvMat*(CvMat*, int, int, int, void*, int); +using cvGetRawDataF = void(const CvArr*, uchar**, int* step, CvSize* roi_size); +using cvReshapeF = CvMat*(const CvArr*, CvMat*, int, int); +using cvCreateDataF = void(CvArr*); +using cvReleaseDataF = void(CvArr*); + +bool loadOpenCV() { + static bool loaded = false; + if (!loaded) { + loaded = true; + try { +#ifdef _WIN32 + so.reset(new InferenceEngine::details::SharedObjectLoader("opencv_core.dll")); +#elif defined(__APPLE__) + so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.dylib")); +#else + so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.so")); +#endif + } catch (InferenceEngine::details::InferenceEngineException& ex) { + return false; + } + } + return loaded; +} + +void fftshift(CvMat* src, bool inverse) { + static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); + static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); + static auto cvInitMatHeader = reinterpret_cast(so->get_symbol("cvInitMatHeader")); + static auto cvGetRawData = reinterpret_cast(so->get_symbol("cvGetRawData")); + static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); + + + // tl | tr br | bl + // ---+--- -> ---+--- + // bl | br tr | tl + + float* data; + int step; + CvSize size; + cvGetRawData(src, (uchar**)&data, &step, &size); + + int height = size.height; + int width = size.width; + int h2 = height / 2; + int w2 = width / 2; + + if (height % 2 || width % 2) { + // Swap rows. + CvMat* srcTop = new CvMat(); + CvMat* srcBot = new CvMat(); + CvMat* dstTop = new CvMat(); + CvMat* dstBot = new CvMat(); + int topH = inverse ? h2 : (h2 + height % 2); + int botH = height - topH; + cvInitMatHeader(srcTop, topH, width, CV_32FC2, data, step); + cvInitMatHeader(srcBot, botH, width, CV_32FC2, data + topH * width * 2, step); + cvInitMatHeader(dstTop, topH, width, CV_32FC2, data + botH * width * 2, step); + cvInitMatHeader(dstBot, botH, width, CV_32FC2, data, step); + + CvMat* tmp = cvCloneMat(srcTop); + cvCopy(srcBot, dstBot, 0); + cvCopy(tmp, dstTop, 0); + + cvReleaseMat(&tmp); + delete srcTop; + delete srcBot; + delete dstTop; + delete dstBot; + + // Swap columns. + CvMat* srcL = new CvMat(); + CvMat* srcR = new CvMat(); + CvMat* dstL = new CvMat(); + CvMat* dstR = new CvMat(); + int leftW = inverse ? w2 : (w2 + width % 2); + int rightW = width - leftW; + + cvInitMatHeader(srcL, height, leftW, CV_32FC2, data, step); + cvInitMatHeader(srcR, height, rightW, CV_32FC2, data + leftW * 2, step); + cvInitMatHeader(dstL, height, leftW, CV_32FC2, data + rightW * 2, step); + cvInitMatHeader(dstR, height, rightW, CV_32FC2, data, step); + + tmp = cvCloneMat(srcL); + cvCopy(srcR, dstR, 0); + cvCopy(tmp, dstL, 0); + + cvReleaseMat(&tmp); + delete srcL; + delete srcR; + delete dstL; + delete dstR; + + return; + } + + CvMat* tl = new CvMat(); + CvMat* tr = new CvMat(); + CvMat* bl = new CvMat(); + CvMat* br = new CvMat(); + + cvInitMatHeader(tl, h2, w2, CV_32FC2, data, step); + cvInitMatHeader(tr, h2, w2, CV_32FC2, data + width, step); + cvInitMatHeader(bl, h2, w2, CV_32FC2, data + height * width, step); + cvInitMatHeader(br, h2, w2, CV_32FC2, data + height * width + width, step); + + CvArr* mask = 0; + CvMat* tmp = cvCloneMat(tl); + cvCopy(br, tl, mask); + cvCopy(tmp, br, mask); + + cvCopy(tr, tmp, mask); + cvCopy(bl, tr, mask); + cvCopy(tmp, bl, mask); + + cvReleaseMat(&tmp); + + delete tl; + delete tr; + delete bl; + delete br; +} + +//! [op:ctor] +FFT::FFT(const ov::Output& inp, + const ov::Output& dims, + bool inverse, + bool centered) : Op({inp, dims}) { + loadOpenCV(); + constructor_validate_and_infer_types(); + this->inverse = inverse; + this->centered = centered; +} +//! [op:ctor] + +//! [op:validate] +void FFT::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr FFT::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), inverse, centered); +} +//! [op:copy] + +//! [op:visit_attributes] +bool FFT::visit_attributes(ov::AttributeVisitor& visitor) { + int inverse_i = static_cast(inverse); + int centered_i = static_cast(centered); + visitor.on_attribute("inverse", inverse_i); + visitor.on_attribute("centered", centered_i); + inverse = static_cast(inverse_i); + centered = static_cast(centered_i); + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool FFT::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + static auto cvSetData = reinterpret_cast(so->get_symbol("cvSetData")); + static auto cvCreateMatHeader = reinterpret_cast(so->get_symbol("cvCreateMatHeader")); + static auto cvDFT = reinterpret_cast(so->get_symbol("cvDFT")); + static auto cvScale = reinterpret_cast(so->get_symbol("cvConvertScale")); + static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); + static auto cvReshape = reinterpret_cast(so->get_symbol("cvReshape")); + static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); + static auto cvCreateData = reinterpret_cast(so->get_symbol("cvCreateData")); + static auto cvReleaseData = reinterpret_cast(so->get_symbol("cvReleaseData")); + static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); + + float* inpData = reinterpret_cast(inputs[0].data()); + + if (inputs[1].get_element_type() != ov::element::i32) + IE_THROW() << "Unexpected dims type: " << inputs[1].get_element_type(); + + int32_t* signalDimsData = reinterpret_cast(inputs[1].data()); + float* outData = reinterpret_cast(outputs[0].data()); + std::vector dims = inputs[0].get_shape(); + const size_t numSignalDims = inputs[1].get_shape()[0]; + + if (!(dims.size() == 3 && (numSignalDims == 1 && signalDimsData[0] == 1) || + dims.size() == 4 && ((numSignalDims == 1 && signalDimsData[0] == 1) || + (numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2)) || + dims.size() == 5 && ((numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) || + (numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3)))) { + std::ostringstream ss; + for (size_t i = 0; i < numSignalDims; ++i) + ss << signalDimsData[i] << " "; + IE_THROW() << "Unsupported configuration: Input dims " << dims.size() << " and signal dims " << ss.str(); + } + + const int batch = dims[0]; + + if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { + const int channels = dims[1]; + int rows = dims[2]; + int cols = dims[3]; + const int planeSize = channels * rows * cols; + // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { + for (size_t d = 0; d < batch * cols; ++d) { + int b = d / cols; + int col = d % cols; + // Copy a slice from input + CvMat* inpSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); + CvMat* outSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); + cvSetData(inpSlice, reinterpret_cast(inpData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); + cvSetData(outSlice, reinterpret_cast(outData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); + + CvMat* inp_col = cvCloneMat(inpSlice); + + CvMat inp_header, *inp; + inp = cvReshape(inp_col, &inp_header, 2, channels); + + CvMat* out = cvCreateMatHeader(channels, rows, CV_32FC2); + cvCreateData(out); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(channels * rows), 0); + + if (centered) + fftshift(out, false); + + CvMat out_col_header, *out_col; + out_col = cvReshape(out, &out_col_header, 2, channels * rows); + + cvCopy(out_col, outSlice, 0); + + cvReleaseData(inp_col); + cvReleaseMat(&inp_col); + + cvReleaseData(out); + cvReleaseMat(&out); + + cvReleaseMat(&inpSlice); + cvReleaseMat(&outSlice); + } + } else if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3) { + const int channels = dims[1]; + int rows = dims[2]; + int cols = dims[3]; + int planeSize = rows * cols * 2; // 2 is last dimension size + // InferenceEngine::parallel_for(batch * channels, [&](size_t d) { + for (size_t d = 0; d < batch * channels; ++d) { + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 4 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { + int rows = dims[1]; + int cols = dims[2]; + int planeSize = rows * cols * 2; // 2 is last dimension size + // InferenceEngine::parallel_for(batch, [&](size_t d) { + for (size_t d = 0; d < batch; ++d) { + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 4 && numSignalDims == 1 && signalDimsData[0] == 1) { + int rows = dims[1]; + int cols = dims[2]; + + const int planeSize = rows; + // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { + for (size_t d = 0; d < batch * cols; ++d) { + int b = d / cols; + int col = d % cols; + CvMat* inp = cvCreateMatHeader(rows, 1, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, 1, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 3) { + int rows = dims[0]; + int cols = dims[1]; + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData), cols * 2 * sizeof(float)); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE | CV_DXT_ROWS, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD | CV_DXT_ROWS, 0); + cvScale(out, out, 1.0 / sqrtf(cols), 0); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + return true; +} + +bool FFT::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp new file mode 100644 index 00000000000..f541f91c66d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class FFT : public ov::op::Op { +public: + OPENVINO_OP("FFT"); + + FFT() = default; + FFT(const ov::Output& inp, + const ov::Output& dims, + bool inverse, + bool centered); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool inverse = false; + bool centered = false; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp new file mode 100644 index 00000000000..94fa44336f2 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp @@ -0,0 +1,125 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "grid_sample.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +GridSample::GridSample(const ov::Output& inp, + const ov::Output& grid) : Op({inp, grid}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void GridSample::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); // NC + // Grid input has a shape NxHxWx2 + auto gridShape = get_input_partial_shape(1); + outShape[2] = gridShape[1]; // H + outShape[3] = gridShape[2]; // W + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr GridSample::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool GridSample::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool GridSample::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inpData = reinterpret_cast(inputs[0].data()); + const float* gridData = reinterpret_cast(inputs[1].data()); + float* outData = reinterpret_cast(outputs[0].data()); + + std::vector inpDims = inputs[0].get_shape(); + std::vector outDims = outputs[0].get_shape(); + + const int batch = outDims[0]; + const int channels = outDims[1]; + const int height = outDims[2]; + const int width = outDims[3]; + const int inpHeight = inpDims[2]; + const int inpWidth = inpDims[3]; + const int inpPlane = inpHeight * inpWidth; + const int outPlane = height * width; + + std::vector zerosPlane(inpDims[1] * inpDims[2] * inpDims[3], 0); + float* zeros = zerosPlane.data(); + + // InferenceEngine::parallel_for(batch, [&](int d) { + for (int d = 0; d < batch; ++d) { + const float* inp = inpData + d * channels * inpPlane; + const float* grid = gridData + d * outPlane * 2; + for (int y = 0; y < height; ++y) { + for (int x = 0; x < width; ++x) { + int offset = y * width + x; + + float input_x = 0.5f * (grid[offset * 2] + 1) * (inpWidth - 1); + int x0 = std::floor(input_x); + int x1 = x0 + 1; + + float input_y = 0.5f * (grid[offset * 2 + 1] + 1) * (inpHeight - 1); + int y0 = std::floor(input_y); + int y1 = y0 + 1; + + const float* inp_row0 = (0 <= y0 && y0 < inpHeight) ? inp + y0 * inpWidth : zeros; + const float* inp_row1 = (0 <= y1 && y1 < inpHeight) ? inp + y1 * inpWidth : zeros; + float* out = outData + d * channels * outPlane; + if ((x1 < 0 || inpWidth <= x1) && (x0 < 0 || inpWidth <= x0)) { + for (int c = 0; c < channels; ++c) { + out[offset] = 0; + out += outPlane; + } + } + else if (x1 < 0 || inpWidth <= x1) { + for (int c = 0; c < channels; ++c) { + out[offset] = inp_row0[x0] + + (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + + (input_x - x0) * (-inp_row0[x0] + + (input_y - y0) * (inp_row0[x0] - inp_row1[x0])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } + else if (x0 < 0 || inpWidth <= x0) { + for (int c = 0; c < channels; ++c) { + out[offset] = + (input_x - x0) * (inp_row0[x1] + (input_y - y0) * (inp_row1[x1] - inp_row0[x1])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } else { + for (int c = 0; c < channels; ++c) { + out[offset] = inp_row0[x0] + + (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + + (input_x - x0) * (inp_row0[x1] - inp_row0[x0] + + (input_y - y0) * (inp_row1[x1] - inp_row0[x1] - inp_row1[x0] + inp_row0[x0])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } + } + } + } + return true; +} + +bool GridSample::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp new file mode 100644 index 00000000000..be259717045 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class GridSample : public ov::op::Op { +public: + OPENVINO_OP("GridSample"); + + GridSample() = default; + GridSample(const ov::Output& inp, + const ov::Output& grid); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp new file mode 100644 index 00000000000..e048e5f08cd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "unpool.hpp" +#include "sparse_conv.hpp" +#include "sparse_conv_transpose.hpp" +#include "complex_mul.hpp" +#include "calculate_grid.hpp" +#include "grid_sample.hpp" +#include "fft.hpp" + +// clang-format off +//! [ov_extension:entry_point] +OPENVINO_CREATE_EXTENSIONS( + std::vector({ + // Register operation itself, required to be read from IR + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + + // Register operaton mapping, required when converted from framework model format + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>() + })); +//! [ov_extension:entry_point] +// clang-format on diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp new file mode 100644 index 00000000000..dd561626c43 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "sparse_conv.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +SparseConv::SparseConv(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void SparseConv::validate_and_infer_types() { + auto outShape = get_input_partial_shape(2); + auto kernelShape = get_input_partial_shape(3); + outShape[1] = kernelShape[4]; + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr SparseConv::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool SparseConv::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool SparseConv::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* features = reinterpret_cast(inputs[0].data()); + const float* inpPos = reinterpret_cast(inputs[1].data()); + const float* outPos = reinterpret_cast(inputs[2].data()); + const float* kernel = reinterpret_cast(inputs[3].data()); + const float* offset = reinterpret_cast(inputs[4].data()); + float* out = reinterpret_cast(outputs[0].data()); + memset(out, 0, outputs[0].get_byte_size()); + + size_t numInpPoints = inputs[1].get_shape()[0]; + const size_t numOutPoints = inputs[2].get_shape()[0]; + std::vector kernelDims = inputs[3].get_shape(); + + // Kernel layout is DxHxWxICxOH + const int kd = kernelDims[0]; + const int kh = kernelDims[1]; + const int kw = kernelDims[2]; + const int IC = kernelDims[3]; + const int OC = kernelDims[4]; + + // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py + float rw = kw * 0.51f; + float rh = kh * 0.51f; + float rd = kd * 0.51f; + + for (size_t i = 0; i < numInpPoints; ++i) { + if (inpPos[i * 3] < 0) { + numInpPoints = i; + break; + } + } + + for (size_t i = 0; i < numOutPoints; ++i) { + const float xi = outPos[i * 3] - offset[0]; + const float yi = outPos[i * 3 + 1] - offset[1]; + const float zi = outPos[i * 3 + 2] - offset[2]; + + // Accumulate features which inside the kernel + for (size_t j = 0; j < numInpPoints; ++j) { + const float xj = inpPos[j * 3]; + const float yj = inpPos[j * 3 + 1]; + const float zj = inpPos[j * 3 + 2]; + + if (xi - rw <= xj && xj <= xi + rw && + yi - rh <= yj && yj <= yi + rh && + zi - rd <= zj && zj <= zi + rd) { + + const int w = std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); + const int h = std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); + const int d = std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); + + const float* featuresOffset = features + j * IC; + for (size_t ic = 0; ic < IC; ++ic) { + const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); + for (size_t oc = 0; oc < OC; ++oc) { + out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; + } + } + } + } + } + return true; +} + +bool SparseConv::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp new file mode 100644 index 00000000000..8d508e725aa --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class SparseConv : public ov::op::Op { +public: + OPENVINO_OP("SparseConv"); + + SparseConv() = default; + SparseConv(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp new file mode 100644 index 00000000000..dfd8d525116 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "sparse_conv_transpose.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +SparseConvTranspose::SparseConvTranspose(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void SparseConvTranspose::validate_and_infer_types() { + auto outShape = get_input_partial_shape(2); + auto kernelShape = get_input_partial_shape(3); + outShape[1] = kernelShape[4]; + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr SparseConvTranspose::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool SparseConvTranspose::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool SparseConvTranspose::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* features = reinterpret_cast(inputs[0].data()); + const float* inpPos = reinterpret_cast(inputs[1].data()); + const float* outPos = reinterpret_cast(inputs[2].data()); + const float* kernel = reinterpret_cast(inputs[3].data()); + const float* offset = reinterpret_cast(inputs[4].data()); + float* out = reinterpret_cast(outputs[0].data()); + memset(out, 0, outputs[0].get_byte_size()); + + size_t numInpPoints = inputs[1].get_shape()[0]; + const size_t numOutPoints = inputs[2].get_shape()[0]; + std::vector kernelDims = inputs[3].get_shape(); + + // Kernel layout is DxHxWxICxOH + const int kd = kernelDims[0]; + const int kh = kernelDims[1]; + const int kw = kernelDims[2]; + const int IC = kernelDims[3]; + const int OC = kernelDims[4]; + + // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py + float rw = kw * 0.51f; + float rh = kh * 0.51f; + float rd = kd * 0.51f; + + for (size_t i = 0; i < numInpPoints; ++i) { + if (inpPos[i * 3] < 0) { + numInpPoints = i; + break; + } + } + + for (size_t i = 0; i < numOutPoints; ++i) { + const float xi = outPos[i * 3] - offset[0]; + const float yi = outPos[i * 3 + 1] - offset[1]; + const float zi = outPos[i * 3 + 2] - offset[2]; + + // Accumulate features which inside the kernel + for (size_t j = 0; j < numInpPoints; ++j) { + const float xj = inpPos[j * 3]; + const float yj = inpPos[j * 3 + 1]; + const float zj = inpPos[j * 3 + 2]; + + if (xi - rw <= xj && xj <= xi + rw && + yi - rh <= yj && yj <= yi + rh && + zi - rd <= zj && zj <= zi + rd) { + + const int w = kw - 1 - std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); + const int h = kh - 1 - std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); + const int d = kd - 1 - std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); + + const float* featuresOffset = features + j * IC; + for (size_t ic = 0; ic < IC; ++ic) { + const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); + for (size_t oc = 0; oc < OC; ++oc) { + out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; + } + } + } + } + } + return true; +} + +bool SparseConvTranspose::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp new file mode 100644 index 00000000000..f607d5462df --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class SparseConvTranspose : public ov::op::Op { +public: + OPENVINO_OP("SparseConvTranspose"); + + SparseConvTranspose() = default; + SparseConvTranspose(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp new file mode 100644 index 00000000000..caae15055fb --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unpool.hpp" +// #include + +using namespace TemplateExtension; + +//! [op:ctor] +Unpool::Unpool(const ov::Output& poolInp, + const ov::Output& poolOut, + const ov::Output& inp, + const ov::Output& shape) : Op({poolInp, poolOut, inp, shape}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void Unpool::validate_and_infer_types() { + auto outShape = get_input_partial_shape(3); + auto poolInpShape = get_input_partial_shape(0).to_shape(); + outShape[0] = poolInpShape[0]; // Use only spatial dimensions from shape + outShape[1] = poolInpShape[1]; // and restore batch and channels + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr Unpool::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 4, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool Unpool::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool Unpool::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* poolInp = reinterpret_cast(inputs[0].data()); + const float* poolOut = reinterpret_cast(inputs[1].data()); + const float* inp = reinterpret_cast(inputs[2].data()); + float* out = reinterpret_cast(outputs[0].data()); + + std::vector poolInpDims = inputs[0].get_shape(); + std::vector poolOutDims = inputs[1].get_shape(); + std::vector inpDims = inputs[2].get_shape(); + std::vector outDims = outputs[0].get_shape(); + + const size_t batch = poolInpDims[0]; + const size_t channels = poolInpDims[1]; + const size_t height = poolInpDims[2]; + const size_t width = poolInpDims[3]; + const size_t outHeight = outDims[2]; + const size_t outWidth = outDims[3]; + const size_t poolOutHeight = poolOutDims[2]; + const size_t poolOutWidth = poolOutDims[3]; + + std::vector mask(inputs[1].get_size(), false); + + memset(out, 0, outputs[0].get_byte_size()); + // InferenceEngine::parallel_for(batch*channels, [&](size_t d) { + for (size_t d = 0; d < batch * channels; ++d) { + for (int y = 0; y < height; ++y) { + for (int x = 0; x < width; ++x) { + int poolOutIdx = (d * poolOutHeight + y / 2) * poolOutWidth + x / 2; + int poolInpIdx = (d * height + y) * width + x; + int dstIdx = d * outHeight * outWidth + (y * width + x); + if (fabs(poolInp[poolInpIdx] - poolOut[poolOutIdx]) < 1e-5f && !mask[poolOutIdx]) { + out[dstIdx] = inp[poolOutIdx]; + mask[poolOutIdx] = true; + } + } + } + } + return true; +} + +bool Unpool::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp new file mode 100644 index 00000000000..275a80c23f7 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] +//! [op:frontend_include] +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED +# include +#endif +//! [op:frontend_include] + +//! [op:header] +namespace TemplateExtension { + +class Unpool : public ov::op::Op { +public: + OPENVINO_OP("MaxPoolGrad"); + +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED + OPENVINO_FRAMEWORK_MAP(onnx, "MaxPoolGrad") +#endif + + Unpool() = default; + Unpool(const ov::Output& poolInp, + const ov::Output& poolOut, + const ov::Output& inp, + const ov::Output& shape); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py deleted file mode 100644 index 8c6c6e0cb68..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/r2unet.py +++ /dev/null @@ -1,423 +0,0 @@ -import torch -from torch import nn -from torch.nn import init - -def init_weights(net, init_type='normal', gain=0.02): - def init_func(m): - classname = m.__class__.__name__ - if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): - if init_type == 'normal': - init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'kaiming': - init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - init.orthogonal_(m.weight.data, gain=gain) - else: - raise NotImplementedError(f'initialization method {init_type} is not implemented') - if hasattr(m, 'bias') and m.bias is not None: - init.constant_(m.bias.data, 0.0) - elif classname.find('BatchNorm2d') != -1: - init.normal_(m.weight.data, 1.0, gain) - init.constant_(m.bias.data, 0.0) - - print(f'initialize network with {init_type}') - net.apply(init_func) - -class conv_block(nn.Module): - def __init__(self,ch_in,ch_out): - super().__init__() - self.conv = nn.Sequential( - nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True), - nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - - def forward(self,x): - x = self.conv(x) - return x - -class up_conv(nn.Module): - def __init__(self,ch_in,ch_out): - super().__init__() - self.up = nn.Sequential( - nn.Upsample(scale_factor=2), - nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - x = self.up(x) - return x - -class Recurrent_block(nn.Module): - def __init__(self,ch_out,t=2): - super().__init__() - self.t = t - self.ch_out = ch_out - self.conv = nn.Sequential( - nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - for i in range(self.t): - - if i==0: - x1 = self.conv(x) - - x1 = self.conv(x+x1) - return x1 - -class RRCNN_block(nn.Module): - def __init__(self,ch_in,ch_out,t=2): - super().__init__() - self.RCNN = nn.Sequential( - Recurrent_block(ch_out,t=t), - Recurrent_block(ch_out,t=t) - ) - self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0) - - def forward(self,x): - x = self.Conv_1x1(x) - x1 = self.RCNN(x) - return x+x1 - - -class single_conv(nn.Module): - def __init__(self,ch_in,ch_out): - super().__init__() - self.conv = nn.Sequential( - nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True), - nn.BatchNorm2d(ch_out), - nn.ReLU(inplace=True) - ) - - def forward(self,x): - x = self.conv(x) - return x - -class Attention_block(nn.Module): - def __init__(self,F_g,F_l,F_int): - super().__init__() - self.W_g = nn.Sequential( - nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(F_int) - ) - - self.W_x = nn.Sequential( - nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(F_int) - ) - - self.psi = nn.Sequential( - nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True), - nn.BatchNorm2d(1), - nn.Sigmoid() - ) - - self.relu = nn.ReLU(inplace=True) - - def forward(self,g,x): - g1 = self.W_g(g) - x1 = self.W_x(x) - psi = self.relu(g1+x1) - psi = self.psi(psi) - - return x*psi - - -class U_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1): - super().__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) - self.Conv2 = conv_block(ch_in=64,ch_out=128) - self.Conv3 = conv_block(ch_in=128,ch_out=256) - self.Conv4 = conv_block(ch_in=256,ch_out=512) - self.Conv5 = conv_block(ch_in=512,ch_out=1024) - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Up_conv2 = conv_block(ch_in=128, ch_out=64) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.Conv1(x) - - x2 = self.Maxpool(x1) - x2 = self.Conv2(x2) - - x3 = self.Maxpool(x2) - x3 = self.Conv3(x3) - - x4 = self.Maxpool(x3) - x4 = self.Conv4(x4) - - x5 = self.Maxpool(x4) - x5 = self.Conv5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - d5 = torch.cat((x4,d5),dim=1) - - d5 = self.Up_conv5(d5) - - d4 = self.Up4(d5) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_conv4(d4) - - d3 = self.Up3(d4) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_conv3(d3) - - d2 = self.Up2(d3) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_conv2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - -class R2U_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1,t=2): - super().__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - self.Upsample = nn.Upsample(scale_factor=2) - - self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) - - self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - - self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - - self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - - self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.RRCNN1(x) - - x2 = self.Maxpool(x1) - x2 = self.RRCNN2(x2) - - x3 = self.Maxpool(x2) - x3 = self.RRCNN3(x3) - - x4 = self.Maxpool(x3) - x4 = self.RRCNN4(x4) - - x5 = self.Maxpool(x4) - x5 = self.RRCNN5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_RRCNN5(d5) - - d4 = self.Up4(d5) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_RRCNN4(d4) - - d3 = self.Up3(d4) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_RRCNN3(d3) - - d2 = self.Up2(d3) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_RRCNN2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - - -class AttU_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1): - super().__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - - self.Conv1 = conv_block(ch_in=img_ch,ch_out=64) - self.Conv2 = conv_block(ch_in=64,ch_out=128) - self.Conv3 = conv_block(ch_in=128,ch_out=256) - self.Conv4 = conv_block(ch_in=256,ch_out=512) - self.Conv5 = conv_block(ch_in=512,ch_out=1024) - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) - self.Up_conv5 = conv_block(ch_in=1024, ch_out=512) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) - self.Up_conv4 = conv_block(ch_in=512, ch_out=256) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) - self.Up_conv3 = conv_block(ch_in=256, ch_out=128) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) - self.Up_conv2 = conv_block(ch_in=128, ch_out=64) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.Conv1(x) - - x2 = self.Maxpool(x1) - x2 = self.Conv2(x2) - - x3 = self.Maxpool(x2) - x3 = self.Conv3(x3) - - x4 = self.Maxpool(x3) - x4 = self.Conv4(x4) - - x5 = self.Maxpool(x4) - x5 = self.Conv5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - x4 = self.Att5(g=d5,x=x4) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_conv5(d5) - - d4 = self.Up4(d5) - x3 = self.Att4(g=d4,x=x3) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_conv4(d4) - - d3 = self.Up3(d4) - x2 = self.Att3(g=d3,x=x2) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_conv3(d3) - - d2 = self.Up2(d3) - x1 = self.Att2(g=d2,x=x1) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_conv2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 - - -class R2AttU_Net(nn.Module): - def __init__(self,img_ch=3,output_ch=1,t=2): - super().__init__() - - self.Maxpool = nn.MaxPool2d(kernel_size=2,stride=2) - self.Upsample = nn.Upsample(scale_factor=2) - - self.RRCNN1 = RRCNN_block(ch_in=img_ch,ch_out=64,t=t) - - self.RRCNN2 = RRCNN_block(ch_in=64,ch_out=128,t=t) - - self.RRCNN3 = RRCNN_block(ch_in=128,ch_out=256,t=t) - - self.RRCNN4 = RRCNN_block(ch_in=256,ch_out=512,t=t) - - self.RRCNN5 = RRCNN_block(ch_in=512,ch_out=1024,t=t) - - - self.Up5 = up_conv(ch_in=1024,ch_out=512) - self.Att5 = Attention_block(F_g=512,F_l=512,F_int=256) - self.Up_RRCNN5 = RRCNN_block(ch_in=1024, ch_out=512,t=t) - - self.Up4 = up_conv(ch_in=512,ch_out=256) - self.Att4 = Attention_block(F_g=256,F_l=256,F_int=128) - self.Up_RRCNN4 = RRCNN_block(ch_in=512, ch_out=256,t=t) - - self.Up3 = up_conv(ch_in=256,ch_out=128) - self.Att3 = Attention_block(F_g=128,F_l=128,F_int=64) - self.Up_RRCNN3 = RRCNN_block(ch_in=256, ch_out=128,t=t) - - self.Up2 = up_conv(ch_in=128,ch_out=64) - self.Att2 = Attention_block(F_g=64,F_l=64,F_int=32) - self.Up_RRCNN2 = RRCNN_block(ch_in=128, ch_out=64,t=t) - - self.Conv_1x1 = nn.Conv2d(64,output_ch,kernel_size=1,stride=1,padding=0) - - - def forward(self,x): - # encoding path - x1 = self.RRCNN1(x) - - x2 = self.Maxpool(x1) - x2 = self.RRCNN2(x2) - - x3 = self.Maxpool(x2) - x3 = self.RRCNN3(x3) - - x4 = self.Maxpool(x3) - x4 = self.RRCNN4(x4) - - x5 = self.Maxpool(x4) - x5 = self.RRCNN5(x5) - - # decoding + concat path - d5 = self.Up5(x5) - x4 = self.Att5(g=d5,x=x4) - d5 = torch.cat((x4,d5),dim=1) - d5 = self.Up_RRCNN5(d5) - - d4 = self.Up4(d5) - x3 = self.Att4(g=d4,x=x3) - d4 = torch.cat((x3,d4),dim=1) - d4 = self.Up_RRCNN4(d4) - - d3 = self.Up3(d4) - x2 = self.Att3(g=d3,x=x2) - d3 = torch.cat((x2,d3),dim=1) - d3 = self.Up_RRCNN3(d3) - - d2 = self.Up2(d3) - x1 = self.Att2(g=d2,x=x1) - d2 = torch.cat((x1,d2),dim=1) - d2 = self.Up_RRCNN2(d2) - - d1 = self.Conv_1x1(d2) - - return d1 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb new file mode 100644 index 00000000000..22a39b40125 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb @@ -0,0 +1,589 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "ename": "ImportError", + "evalue": "attempted relative import with no known parent package", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mnn\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mnn\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mmodels\u001b[39;00m \u001b[39mimport\u001b[39;00m SUMNet\n\u001b[1;32m 5\u001b[0m n \u001b[39m=\u001b[39m SUMNet()\n\u001b[1;32m 6\u001b[0m example_weight \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mrand(\u001b[39m1\u001b[39m, \u001b[39m1\u001b[39m, \u001b[39m3\u001b[39m, \u001b[39m3\u001b[39m)\n", + "File \u001b[0;32m~/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py:5\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mnn\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mfunctional\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mF\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtorchvision\u001b[39;00m \u001b[39mimport\u001b[39;00m models\n\u001b[0;32m----> 5\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mmax_unpool_2d\u001b[39;00m \u001b[39mimport\u001b[39;00m MaxUnpool2d\n\u001b[1;32m 7\u001b[0m \u001b[39mclass\u001b[39;00m \u001b[39mSUMNet\u001b[39;00m(nn\u001b[39m.\u001b[39mModule):\n\u001b[1;32m 8\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\u001b[39mself\u001b[39m,in_ch,out_ch):\n", + "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package" + ] + } + ], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from models import SUMNet\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection\n" + ] + } + ], + "source": [ + "cd .." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "from src.utils.models import SUMNet" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/deeptensor/anaconda3/envs/openv/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n", + " warnings.warn(\n", + "/home/deeptensor/anaconda3/envs/openv/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=VGG11_BN_Weights.IMAGENET1K_V1`. You can also use `weights=VGG11_BN_Weights.DEFAULT` to get the most up-to-date weights.\n", + " warnings.warn(msg)\n" + ] + } + ], + "source": [ + "n = SUMNet(in_ch=1,out_ch=2)\n", + "weights = torch.load(\"/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/downloads/model_weights/stage1/sumnet_adv_best_lungs.pt\")\n", + "n.load_state_dict(weights)\n", + "# example_weight = torch.rand(1, 1, 3, 3)\n", + "example_forward_input = torch.rand(2, 1, 64, 64)\n", + "\n", + "# Trace a specific method and construct `ScriptModule` with\n", + "# a single `forward` method\n", + "module = torch.jit.trace(n.forward, example_forward_input)\n", + "\n", + "# Trace a module (implicitly traces `forward`) and construct a\n", + "# `ScriptModule` with a single `forward` method\n", + "module = torch.jit.trace(n, example_forward_input)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "SUMNet(\n", + " original_name=SUMNet\n", + " (encoder): Sequential(\n", + " original_name=Sequential\n", + " (0): Conv2d(original_name=Conv2d)\n", + " (1): BatchNorm2d(original_name=BatchNorm2d)\n", + " (2): ReLU(original_name=ReLU)\n", + " (3): MaxPool2d(original_name=MaxPool2d)\n", + " (4): Conv2d(original_name=Conv2d)\n", + " (5): BatchNorm2d(original_name=BatchNorm2d)\n", + " (6): ReLU(original_name=ReLU)\n", + " (7): MaxPool2d(original_name=MaxPool2d)\n", + " (8): Conv2d(original_name=Conv2d)\n", + " (9): BatchNorm2d(original_name=BatchNorm2d)\n", + " (10): ReLU(original_name=ReLU)\n", + " (11): Conv2d(original_name=Conv2d)\n", + " (12): BatchNorm2d(original_name=BatchNorm2d)\n", + " (13): ReLU(original_name=ReLU)\n", + " (14): MaxPool2d(original_name=MaxPool2d)\n", + " (15): Conv2d(original_name=Conv2d)\n", + " (16): BatchNorm2d(original_name=BatchNorm2d)\n", + " (17): ReLU(original_name=ReLU)\n", + " (18): Conv2d(original_name=Conv2d)\n", + " (19): BatchNorm2d(original_name=BatchNorm2d)\n", + " (20): ReLU(original_name=ReLU)\n", + " (21): MaxPool2d(original_name=MaxPool2d)\n", + " (22): Conv2d(original_name=Conv2d)\n", + " (23): BatchNorm2d(original_name=BatchNorm2d)\n", + " (24): ReLU(original_name=ReLU)\n", + " (25): Conv2d(original_name=Conv2d)\n", + " (26): BatchNorm2d(original_name=BatchNorm2d)\n", + " (27): ReLU(original_name=ReLU)\n", + " (28): MaxPool2d(original_name=MaxPool2d)\n", + " )\n", + " (preconv): Conv2d(original_name=Conv2d)\n", + " (conv1): Conv2d(original_name=Conv2d)\n", + " (bn1): BatchNorm2d(original_name=BatchNorm2d)\n", + " (pool1): MaxPool2d(original_name=MaxPool2d)\n", + " (conv2): Conv2d(original_name=Conv2d)\n", + " (bn2): BatchNorm2d(original_name=BatchNorm2d)\n", + " (pool2): MaxPool2d(original_name=MaxPool2d)\n", + " (conv3a): Conv2d(original_name=Conv2d)\n", + " (bn3): BatchNorm2d(original_name=BatchNorm2d)\n", + " (conv3b): Conv2d(original_name=Conv2d)\n", + " (bn4): BatchNorm2d(original_name=BatchNorm2d)\n", + " (pool3): MaxPool2d(original_name=MaxPool2d)\n", + " (conv4a): Conv2d(original_name=Conv2d)\n", + " (bn5): BatchNorm2d(original_name=BatchNorm2d)\n", + " (conv4b): Conv2d(original_name=Conv2d)\n", + " (bn6): BatchNorm2d(original_name=BatchNorm2d)\n", + " (pool4): MaxPool2d(original_name=MaxPool2d)\n", + " (conv5a): Conv2d(original_name=Conv2d)\n", + " (bn7): BatchNorm2d(original_name=BatchNorm2d)\n", + " (conv5b): Conv2d(original_name=Conv2d)\n", + " (bn8): BatchNorm2d(original_name=BatchNorm2d)\n", + " (pool5): MaxPool2d(original_name=MaxPool2d)\n", + " (unpool5): MaxUnpool2d(original_name=MaxUnpool2d)\n", + " (donv5b): Conv2d(original_name=Conv2d)\n", + " (donv5a): Conv2d(original_name=Conv2d)\n", + " (unpool4): MaxUnpool2d(original_name=MaxUnpool2d)\n", + " (donv4b): Conv2d(original_name=Conv2d)\n", + " (donv4a): Conv2d(original_name=Conv2d)\n", + " (unpool3): MaxUnpool2d(original_name=MaxUnpool2d)\n", + " (donv3b): Conv2d(original_name=Conv2d)\n", + " (donv3a): Conv2d(original_name=Conv2d)\n", + " (unpool2): MaxUnpool2d(original_name=MaxUnpool2d)\n", + " (donv2): Conv2d(original_name=Conv2d)\n", + " (unpool1): MaxUnpool2d(original_name=MaxUnpool2d)\n", + " (donv1): Conv2d(original_name=Conv2d)\n", + " (output): Conv2d(original_name=Conv2d)\n", + ")" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "module" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "torch.onnx.export(\n", + " n,\n", + " example_forward_input,\n", + " \"AbsSummarizer.onnx\",\n", + " opset_version=11)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "graph torch_jit (\n", + " %input.1[FLOAT, 2x1x64x64]\n", + ") initializers (\n", + " %preconv.weight[FLOAT, 3x1x1x1]\n", + " %preconv.bias[FLOAT, 3]\n", + " %donv5b.weight[FLOAT, 512x1024x3x3]\n", + " %donv5b.bias[FLOAT, 512]\n", + " %donv5a.weight[FLOAT, 512x512x3x3]\n", + " %donv5a.bias[FLOAT, 512]\n", + " %donv4b.weight[FLOAT, 512x1024x3x3]\n", + " %donv4b.bias[FLOAT, 512]\n", + " %donv4a.weight[FLOAT, 256x512x3x3]\n", + " %donv4a.bias[FLOAT, 256]\n", + " %donv3b.weight[FLOAT, 256x512x3x3]\n", + " %donv3b.bias[FLOAT, 256]\n", + " %donv3a.weight[FLOAT, 128x256x3x3]\n", + " %donv3a.bias[FLOAT, 128]\n", + " %donv2.weight[FLOAT, 64x256x3x3]\n", + " %donv2.bias[FLOAT, 64]\n", + " %donv1.weight[FLOAT, 32x128x3x3]\n", + " %donv1.bias[FLOAT, 32]\n", + " %output.weight[FLOAT, 2x32x1x1]\n", + " %output.bias[FLOAT, 2]\n", + " %onnx::Conv_351[FLOAT, 64x3x3x3]\n", + " %onnx::Conv_352[FLOAT, 64]\n", + " %onnx::Conv_354[FLOAT, 128x64x3x3]\n", + " %onnx::Conv_355[FLOAT, 128]\n", + " %onnx::Conv_357[FLOAT, 256x128x3x3]\n", + " %onnx::Conv_358[FLOAT, 256]\n", + " %onnx::Conv_360[FLOAT, 256x256x3x3]\n", + " %onnx::Conv_361[FLOAT, 256]\n", + " %onnx::Conv_363[FLOAT, 512x256x3x3]\n", + " %onnx::Conv_364[FLOAT, 512]\n", + " %onnx::Conv_366[FLOAT, 512x512x3x3]\n", + " %onnx::Conv_367[FLOAT, 512]\n", + " %onnx::Conv_369[FLOAT, 512x512x3x3]\n", + " %onnx::Conv_370[FLOAT, 512]\n", + " %onnx::Conv_372[FLOAT, 512x512x3x3]\n", + " %onnx::Conv_373[FLOAT, 512]\n", + ") {\n", + " %onnx::Relu_77 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.1, %preconv.weight, %preconv.bias)\n", + " %onnx::Conv_78 = Relu(%onnx::Relu_77)\n", + " %onnx::Relu_350 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_78, %onnx::Conv_351, %onnx::Conv_352)\n", + " %onnx::MaxPool_81 = Relu(%onnx::Relu_350)\n", + " %input.4, %onnx::Sub_83 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_81)\n", + " %84, %onnx::Slice_85 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_81)\n", + " %onnx::Slice_86 = Constant[value = ]()\n", + " %onnx::Slice_87 = Constant[value = ]()\n", + " %onnx::Slice_88 = Constant[value = ]()\n", + " %onnx::Sub_89 = Slice(%onnx::Slice_85, %onnx::Slice_87, %onnx::Slice_88, %onnx::Slice_86)\n", + " %indices = Sub(%onnx::Sub_83, %onnx::Sub_89)\n", + " %onnx::Relu_353 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.4, %onnx::Conv_354, %onnx::Conv_355)\n", + " %onnx::MaxPool_93 = Relu(%onnx::Relu_353)\n", + " %input.12, %onnx::Sub_95 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_93)\n", + " %96, %onnx::Slice_97 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_93)\n", + " %onnx::Slice_98 = Constant[value = ]()\n", + " %onnx::Slice_99 = Constant[value = ]()\n", + " %onnx::Slice_100 = Constant[value = ]()\n", + " %onnx::Sub_101 = Slice(%onnx::Slice_97, %onnx::Slice_99, %onnx::Slice_100, %onnx::Slice_98)\n", + " %indices.3 = Sub(%onnx::Sub_95, %onnx::Sub_101)\n", + " %onnx::Relu_356 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.12, %onnx::Conv_357, %onnx::Conv_358)\n", + " %onnx::Conv_105 = Relu(%onnx::Relu_356)\n", + " %onnx::Relu_359 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_105, %onnx::Conv_360, %onnx::Conv_361)\n", + " %onnx::MaxPool_108 = Relu(%onnx::Relu_359)\n", + " %input.24, %onnx::Sub_110 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_108)\n", + " %111, %onnx::Slice_112 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_108)\n", + " %onnx::Slice_113 = Constant[value = ]()\n", + " %onnx::Slice_114 = Constant[value = ]()\n", + " %onnx::Slice_115 = Constant[value = ]()\n", + " %onnx::Sub_116 = Slice(%onnx::Slice_112, %onnx::Slice_114, %onnx::Slice_115, %onnx::Slice_113)\n", + " %indices.7 = Sub(%onnx::Sub_110, %onnx::Sub_116)\n", + " %onnx::Relu_362 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.24, %onnx::Conv_363, %onnx::Conv_364)\n", + " %onnx::Conv_120 = Relu(%onnx::Relu_362)\n", + " %onnx::Relu_365 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_120, %onnx::Conv_366, %onnx::Conv_367)\n", + " %onnx::MaxPool_123 = Relu(%onnx::Relu_365)\n", + " %input.36, %onnx::Sub_125 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_123)\n", + " %126, %onnx::Slice_127 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_123)\n", + " %onnx::Slice_128 = Constant[value = ]()\n", + " %onnx::Slice_129 = Constant[value = ]()\n", + " %onnx::Slice_130 = Constant[value = ]()\n", + " %onnx::Sub_131 = Slice(%onnx::Slice_127, %onnx::Slice_129, %onnx::Slice_130, %onnx::Slice_128)\n", + " %indices.11 = Sub(%onnx::Sub_125, %onnx::Sub_131)\n", + " %onnx::Relu_368 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.36, %onnx::Conv_369, %onnx::Conv_370)\n", + " %onnx::Conv_135 = Relu(%onnx::Relu_368)\n", + " %onnx::Relu_371 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_135, %onnx::Conv_372, %onnx::Conv_373)\n", + " %onnx::MaxPool_138 = Relu(%onnx::Relu_371)\n", + " %input.48, %onnx::Sub_140 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_138)\n", + " %141, %onnx::Slice_142 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_138)\n", + " %onnx::Slice_143 = Constant[value = ]()\n", + " %onnx::Slice_144 = Constant[value = ]()\n", + " %onnx::Slice_145 = Constant[value = ]()\n", + " %onnx::Sub_146 = Slice(%onnx::Slice_142, %onnx::Slice_144, %onnx::Slice_145, %onnx::Slice_143)\n", + " %indices.15 = Sub(%onnx::Sub_140, %onnx::Sub_146)\n", + " %onnx::Gather_148 = Shape(%input.48)\n", + " %onnx::Gather_149 = Constant[value = ]()\n", + " %onnx::Gather_150 = Constant[value = ]()\n", + " %onnx::Range_151 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_149)\n", + " %onnx::Mul_152 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_150)\n", + " %onnx::Gather_153 = Constant[value = ]()\n", + " %onnx::Sub_154 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_153)\n", + " %onnx::Sub_155 = Constant[value = ]()\n", + " %onnx::Mul_156 = Sub(%onnx::Sub_154, %onnx::Sub_155)\n", + " %onnx::Mul_157 = Constant[value = ]()\n", + " %onnx::Add_158 = Mul(%onnx::Mul_156, %onnx::Mul_157)\n", + " %onnx::Add_159 = Constant[value = ]()\n", + " %onnx::Mul_160 = Add(%onnx::Add_158, %onnx::Add_159)\n", + " %onnx::Gather_161 = Constant[value = ]()\n", + " %onnx::Sub_162 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_161)\n", + " %onnx::Sub_163 = Constant[value = ]()\n", + " %onnx::Mul_164 = Sub(%onnx::Sub_162, %onnx::Sub_163)\n", + " %onnx::Mul_165 = Constant[value = ]()\n", + " %onnx::Add_166 = Mul(%onnx::Mul_164, %onnx::Mul_165)\n", + " %onnx::Add_167 = Constant[value = ]()\n", + " %onnx::Mul_168 = Add(%onnx::Add_166, %onnx::Add_167)\n", + " %onnx::Mul_169 = Mul(%onnx::Mul_160, %onnx::Mul_168)\n", + " %onnx::Mul_170 = Mul(%onnx::Mul_169, %onnx::Mul_152)\n", + " %onnx::Reshape_171 = Range(%onnx::Gather_149, %onnx::Mul_152, %onnx::Gather_150)\n", + " %onnx::Reshape_172 = Constant[value = ]()\n", + " %onnx::Mul_173 = Reshape(%onnx::Reshape_171, %onnx::Reshape_172)\n", + " %onnx::Cast_174 = Mul(%onnx::Mul_173, %onnx::Mul_169)\n", + " %onnx::Add_175 = Cast[to = 7](%onnx::Cast_174)\n", + " %onnx::Reshape_176 = Range(%onnx::Gather_149, %onnx::Range_151, %onnx::Gather_150)\n", + " %onnx::Reshape_177 = Constant[value = ]()\n", + " %onnx::Mul_178 = Reshape(%onnx::Reshape_176, %onnx::Reshape_177)\n", + " %onnx::Cast_179 = Mul(%onnx::Mul_178, %onnx::Mul_170)\n", + " %onnx::Add_180 = Cast[to = 7](%onnx::Cast_179)\n", + " %onnx::Add_181 = Add(%indices.15, %onnx::Add_175)\n", + " %onnx::MaxUnpool_182 = Add(%onnx::Add_181, %onnx::Add_180)\n", + " %onnx::Concat_183 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%input.48, %onnx::MaxUnpool_182)\n", + " %input.52 = Concat[axis = 1](%onnx::Concat_183, %onnx::MaxPool_138)\n", + " %onnx::Relu_185 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.52, %donv5b.weight, %donv5b.bias)\n", + " %onnx::Conv_186 = Relu(%onnx::Relu_185)\n", + " %onnx::Relu_187 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_186, %donv5a.weight, %donv5a.bias)\n", + " %onnx::Shape_188 = Relu(%onnx::Relu_187)\n", + " %onnx::Gather_189 = Shape(%onnx::Shape_188)\n", + " %onnx::Gather_190 = Constant[value = ]()\n", + " %onnx::Gather_191 = Constant[value = ]()\n", + " %onnx::Range_192 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_190)\n", + " %onnx::Mul_193 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_191)\n", + " %onnx::Gather_194 = Constant[value = ]()\n", + " %onnx::Sub_195 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_194)\n", + " %onnx::Sub_196 = Constant[value = ]()\n", + " %onnx::Mul_197 = Sub(%onnx::Sub_195, %onnx::Sub_196)\n", + " %onnx::Mul_198 = Constant[value = ]()\n", + " %onnx::Add_199 = Mul(%onnx::Mul_197, %onnx::Mul_198)\n", + " %onnx::Add_200 = Constant[value = ]()\n", + " %onnx::Mul_201 = Add(%onnx::Add_199, %onnx::Add_200)\n", + " %onnx::Gather_202 = Constant[value = ]()\n", + " %onnx::Sub_203 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_202)\n", + " %onnx::Sub_204 = Constant[value = ]()\n", + " %onnx::Mul_205 = Sub(%onnx::Sub_203, %onnx::Sub_204)\n", + " %onnx::Mul_206 = Constant[value = ]()\n", + " %onnx::Add_207 = Mul(%onnx::Mul_205, %onnx::Mul_206)\n", + " %onnx::Add_208 = Constant[value = ]()\n", + " %onnx::Mul_209 = Add(%onnx::Add_207, %onnx::Add_208)\n", + " %onnx::Mul_210 = Mul(%onnx::Mul_201, %onnx::Mul_209)\n", + " %onnx::Mul_211 = Mul(%onnx::Mul_210, %onnx::Mul_193)\n", + " %onnx::Reshape_212 = Range(%onnx::Gather_190, %onnx::Mul_193, %onnx::Gather_191)\n", + " %onnx::Reshape_213 = Constant[value = ]()\n", + " %onnx::Mul_214 = Reshape(%onnx::Reshape_212, %onnx::Reshape_213)\n", + " %onnx::Cast_215 = Mul(%onnx::Mul_214, %onnx::Mul_210)\n", + " %onnx::Add_216 = Cast[to = 7](%onnx::Cast_215)\n", + " %onnx::Reshape_217 = Range(%onnx::Gather_190, %onnx::Range_192, %onnx::Gather_191)\n", + " %onnx::Reshape_218 = Constant[value = ]()\n", + " %onnx::Mul_219 = Reshape(%onnx::Reshape_217, %onnx::Reshape_218)\n", + " %onnx::Cast_220 = Mul(%onnx::Mul_219, %onnx::Mul_211)\n", + " %onnx::Add_221 = Cast[to = 7](%onnx::Cast_220)\n", + " %onnx::Add_222 = Add(%indices.11, %onnx::Add_216)\n", + " %onnx::MaxUnpool_223 = Add(%onnx::Add_222, %onnx::Add_221)\n", + " %onnx::Concat_224 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_188, %onnx::MaxUnpool_223)\n", + " %input.56 = Concat[axis = 1](%onnx::Concat_224, %onnx::MaxPool_123)\n", + " %onnx::Relu_226 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.56, %donv4b.weight, %donv4b.bias)\n", + " %onnx::Conv_227 = Relu(%onnx::Relu_226)\n", + " %onnx::Relu_228 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_227, %donv4a.weight, %donv4a.bias)\n", + " %onnx::Shape_229 = Relu(%onnx::Relu_228)\n", + " %onnx::Gather_230 = Shape(%onnx::Shape_229)\n", + " %onnx::Gather_231 = Constant[value = ]()\n", + " %onnx::Gather_232 = Constant[value = ]()\n", + " %onnx::Range_233 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_231)\n", + " %onnx::Mul_234 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_232)\n", + " %onnx::Gather_235 = Constant[value = ]()\n", + " %onnx::Sub_236 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_235)\n", + " %onnx::Sub_237 = Constant[value = ]()\n", + " %onnx::Mul_238 = Sub(%onnx::Sub_236, %onnx::Sub_237)\n", + " %onnx::Mul_239 = Constant[value = ]()\n", + " %onnx::Add_240 = Mul(%onnx::Mul_238, %onnx::Mul_239)\n", + " %onnx::Add_241 = Constant[value = ]()\n", + " %onnx::Mul_242 = Add(%onnx::Add_240, %onnx::Add_241)\n", + " %onnx::Gather_243 = Constant[value = ]()\n", + " %onnx::Sub_244 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_243)\n", + " %onnx::Sub_245 = Constant[value = ]()\n", + " %onnx::Mul_246 = Sub(%onnx::Sub_244, %onnx::Sub_245)\n", + " %onnx::Mul_247 = Constant[value = ]()\n", + " %onnx::Add_248 = Mul(%onnx::Mul_246, %onnx::Mul_247)\n", + " %onnx::Add_249 = Constant[value = ]()\n", + " %onnx::Mul_250 = Add(%onnx::Add_248, %onnx::Add_249)\n", + " %onnx::Mul_251 = Mul(%onnx::Mul_242, %onnx::Mul_250)\n", + " %onnx::Mul_252 = Mul(%onnx::Mul_251, %onnx::Mul_234)\n", + " %onnx::Reshape_253 = Range(%onnx::Gather_231, %onnx::Mul_234, %onnx::Gather_232)\n", + " %onnx::Reshape_254 = Constant[value = ]()\n", + " %onnx::Mul_255 = Reshape(%onnx::Reshape_253, %onnx::Reshape_254)\n", + " %onnx::Cast_256 = Mul(%onnx::Mul_255, %onnx::Mul_251)\n", + " %onnx::Add_257 = Cast[to = 7](%onnx::Cast_256)\n", + " %onnx::Reshape_258 = Range(%onnx::Gather_231, %onnx::Range_233, %onnx::Gather_232)\n", + " %onnx::Reshape_259 = Constant[value = ]()\n", + " %onnx::Mul_260 = Reshape(%onnx::Reshape_258, %onnx::Reshape_259)\n", + " %onnx::Cast_261 = Mul(%onnx::Mul_260, %onnx::Mul_252)\n", + " %onnx::Add_262 = Cast[to = 7](%onnx::Cast_261)\n", + " %onnx::Add_263 = Add(%indices.7, %onnx::Add_257)\n", + " %onnx::MaxUnpool_264 = Add(%onnx::Add_263, %onnx::Add_262)\n", + " %onnx::Concat_265 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_229, %onnx::MaxUnpool_264)\n", + " %input.60 = Concat[axis = 1](%onnx::Concat_265, %onnx::MaxPool_108)\n", + " %onnx::Relu_267 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.60, %donv3b.weight, %donv3b.bias)\n", + " %onnx::Conv_268 = Relu(%onnx::Relu_267)\n", + " %onnx::Relu_269 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_268, %donv3a.weight, %donv3a.bias)\n", + " %input.64 = Relu(%onnx::Relu_269)\n", + " %onnx::Gather_271 = Shape(%input.64)\n", + " %onnx::Gather_272 = Constant[value = ]()\n", + " %onnx::Gather_273 = Constant[value = ]()\n", + " %onnx::Range_274 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_272)\n", + " %onnx::Mul_275 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_273)\n", + " %onnx::Gather_276 = Constant[value = ]()\n", + " %onnx::Sub_277 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_276)\n", + " %onnx::Sub_278 = Constant[value = ]()\n", + " %onnx::Mul_279 = Sub(%onnx::Sub_277, %onnx::Sub_278)\n", + " %onnx::Mul_280 = Constant[value = ]()\n", + " %onnx::Add_281 = Mul(%onnx::Mul_279, %onnx::Mul_280)\n", + " %onnx::Add_282 = Constant[value = ]()\n", + " %onnx::Mul_283 = Add(%onnx::Add_281, %onnx::Add_282)\n", + " %onnx::Gather_284 = Constant[value = ]()\n", + " %onnx::Sub_285 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_284)\n", + " %onnx::Sub_286 = Constant[value = ]()\n", + " %onnx::Mul_287 = Sub(%onnx::Sub_285, %onnx::Sub_286)\n", + " %onnx::Mul_288 = Constant[value = ]()\n", + " %onnx::Add_289 = Mul(%onnx::Mul_287, %onnx::Mul_288)\n", + " %onnx::Add_290 = Constant[value = ]()\n", + " %onnx::Mul_291 = Add(%onnx::Add_289, %onnx::Add_290)\n", + " %onnx::Mul_292 = Mul(%onnx::Mul_283, %onnx::Mul_291)\n", + " %onnx::Mul_293 = Mul(%onnx::Mul_292, %onnx::Mul_275)\n", + " %onnx::Reshape_294 = Range(%onnx::Gather_272, %onnx::Mul_275, %onnx::Gather_273)\n", + " %onnx::Reshape_295 = Constant[value = ]()\n", + " %onnx::Mul_296 = Reshape(%onnx::Reshape_294, %onnx::Reshape_295)\n", + " %onnx::Cast_297 = Mul(%onnx::Mul_296, %onnx::Mul_292)\n", + " %onnx::Add_298 = Cast[to = 7](%onnx::Cast_297)\n", + " %onnx::Reshape_299 = Range(%onnx::Gather_272, %onnx::Range_274, %onnx::Gather_273)\n", + " %onnx::Reshape_300 = Constant[value = ]()\n", + " %onnx::Mul_301 = Reshape(%onnx::Reshape_299, %onnx::Reshape_300)\n", + " %onnx::Cast_302 = Mul(%onnx::Mul_301, %onnx::Mul_293)\n", + " %onnx::Add_303 = Cast[to = 7](%onnx::Cast_302)\n", + " %onnx::Add_304 = Add(%indices.3, %onnx::Add_298)\n", + " %onnx::MaxUnpool_305 = Add(%onnx::Add_304, %onnx::Add_303)\n", + " %onnx::Concat_306 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%input.64, %onnx::MaxUnpool_305)\n", + " %input.68 = Concat[axis = 1](%onnx::Concat_306, %onnx::MaxPool_93)\n", + " %onnx::Relu_308 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.68, %donv2.weight, %donv2.bias)\n", + " %onnx::Shape_309 = Relu(%onnx::Relu_308)\n", + " %onnx::Gather_310 = Shape(%onnx::Shape_309)\n", + " %onnx::Gather_311 = Constant[value = ]()\n", + " %onnx::Gather_312 = Constant[value = ]()\n", + " %onnx::Range_313 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_311)\n", + " %onnx::Mul_314 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_312)\n", + " %onnx::Gather_315 = Constant[value = ]()\n", + " %onnx::Sub_316 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_315)\n", + " %onnx::Sub_317 = Constant[value = ]()\n", + " %onnx::Mul_318 = Sub(%onnx::Sub_316, %onnx::Sub_317)\n", + " %onnx::Mul_319 = Constant[value = ]()\n", + " %onnx::Add_320 = Mul(%onnx::Mul_318, %onnx::Mul_319)\n", + " %onnx::Add_321 = Constant[value = ]()\n", + " %onnx::Mul_322 = Add(%onnx::Add_320, %onnx::Add_321)\n", + " %onnx::Gather_323 = Constant[value = ]()\n", + " %onnx::Sub_324 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_323)\n", + " %onnx::Sub_325 = Constant[value = ]()\n", + " %onnx::Mul_326 = Sub(%onnx::Sub_324, %onnx::Sub_325)\n", + " %onnx::Mul_327 = Constant[value = ]()\n", + " %onnx::Add_328 = Mul(%onnx::Mul_326, %onnx::Mul_327)\n", + " %onnx::Add_329 = Constant[value = ]()\n", + " %onnx::Mul_330 = Add(%onnx::Add_328, %onnx::Add_329)\n", + " %onnx::Mul_331 = Mul(%onnx::Mul_322, %onnx::Mul_330)\n", + " %onnx::Mul_332 = Mul(%onnx::Mul_331, %onnx::Mul_314)\n", + " %onnx::Reshape_333 = Range(%onnx::Gather_311, %onnx::Mul_314, %onnx::Gather_312)\n", + " %onnx::Reshape_334 = Constant[value = ]()\n", + " %onnx::Mul_335 = Reshape(%onnx::Reshape_333, %onnx::Reshape_334)\n", + " %onnx::Cast_336 = Mul(%onnx::Mul_335, %onnx::Mul_331)\n", + " %onnx::Add_337 = Cast[to = 7](%onnx::Cast_336)\n", + " %onnx::Reshape_338 = Range(%onnx::Gather_311, %onnx::Range_313, %onnx::Gather_312)\n", + " %onnx::Reshape_339 = Constant[value = ]()\n", + " %onnx::Mul_340 = Reshape(%onnx::Reshape_338, %onnx::Reshape_339)\n", + " %onnx::Cast_341 = Mul(%onnx::Mul_340, %onnx::Mul_332)\n", + " %onnx::Add_342 = Cast[to = 7](%onnx::Cast_341)\n", + " %onnx::Add_343 = Add(%indices, %onnx::Add_337)\n", + " %onnx::MaxUnpool_344 = Add(%onnx::Add_343, %onnx::Add_342)\n", + " %onnx::Concat_345 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_309, %onnx::MaxUnpool_344)\n", + " %input.72 = Concat[axis = 1](%onnx::Concat_345, %onnx::MaxPool_81)\n", + " %onnx::Relu_347 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.72, %donv1.weight, %donv1.bias)\n", + " %onnx::Conv_348 = Relu(%onnx::Relu_347)\n", + " %349 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%onnx::Conv_348, %output.weight, %output.bias)\n", + " return %349\n", + "}\n" + ] + } + ], + "source": [ + "import onnx\n", + "\n", + "# Load the ONNX model\n", + "model = onnx.load(\"AbsSummarizer.onnx\")\n", + "\n", + "# Check that the model is well formed\n", + "onnx.checker.check_model(model)\n", + "\n", + "# Print a human readable representation of the graph\n", + "print(onnx.helper.printable_graph(model.graph))" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'1.12.1'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "torch.__version__\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "module 'openvino' has no attribute '__version__'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[20], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mopenvino\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m openvino\u001b[39m.\u001b[39;49m__version__\n", + "\u001b[0;31mAttributeError\u001b[0m: module 'openvino' has no attribute '__version__'" + ] + } + ], + "source": [ + "import openvino\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "openv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.15" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "0d646c53ccd8c179444f222988100eafa4a93600d3e2b5bf836e9c9f08adbeba" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py deleted file mode 100644 index bbcee4d0805..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/sumnet_bn_vgg.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from torchvision import models -from .max_unpool_2d import MaxUnpool2d - -class SUMNet(nn.Module): - def __init__(self,in_ch,out_ch): - super().__init__() - - self.encoder = models.vgg11_bn(pretrained = True).features - self.preconv = nn.Conv2d(in_ch, 3, 1) - self.conv1 = self.encoder[0] - self.bn1 = self.encoder[1] - self.pool1 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv2 = self.encoder[4] - self.bn2 = self.encoder[5] - self.pool2 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv3a = self.encoder[8] - self.bn3 = self.encoder[9] - self.conv3b = self.encoder[11] - self.bn4 = self.encoder[12] - self.pool3 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv4a = self.encoder[15] - self.bn5 = self.encoder[16] - self.conv4b = self.encoder[18] - self.bn6 = self.encoder[19] - self.pool4 = nn.MaxPool2d(2, 2, return_indices = True) - self.conv5a = self.encoder[22] - self.bn7 = self.encoder[23] - self.conv5b = self.encoder[25] - self.bn8 = self.encoder[26] - self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) - - self.unpool5 = MaxUnpool2d(2, 2) - self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) - self.unpool4 = MaxUnpool2d(2, 2) - self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) - self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) - self.unpool3 = MaxUnpool2d(2, 2) - self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) - self.donv3a = nn.Conv2d(256,128, 3, padding = 1) - self.unpool2 = MaxUnpool2d(2, 2) - self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) - self.unpool1 = MaxUnpool2d(2, 2) - self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) - self.output = nn.Conv2d(32, out_ch, 1) - - def forward(self, x): - preconv = F.relu(self.preconv(x), inplace = True) - conv1 = F.relu(self.bn1(self.conv1(preconv)), inplace = True) - pool1, idxs1 = self.pool1(conv1) - conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True) - pool2, idxs2 = self.pool2(conv2) - conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True) - conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True) - pool3, idxs3 = self.pool3(conv3b) - conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True) - conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True) - pool4, idxs4 = self.pool4(conv4b) - conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True) - conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) - pool5, idxs5 = self.pool5(conv5b) - - unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1) - donv5b = F.relu(self.donv5b(unpool5), inplace = True) - donv5a = F.relu(self.donv5a(donv5b), inplace = True) - unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) - donv4b = F.relu(self.donv4b(unpool4), inplace = True) - donv4a = F.relu(self.donv4a(donv4b), inplace = True) - unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) - donv3b = F.relu(self.donv3b(unpool3), inplace = True) - donv3a = F.relu(self.donv3a(donv3b)) - unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) - donv2 = F.relu(self.donv2(unpool2), inplace = True) - unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) - donv1 = F.relu(self.donv1(unpool1), inplace = True) - output = self.output(donv1) - return output diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py similarity index 97% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py index 26e263866fa..88d63fb37f4 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/lung_seg.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py @@ -1,21 +1,18 @@ -import numpy as np import torch import torch.nn as nn from torch import optim -from tqdm import tqdm as tq -import time from torch.utils import data -import os import torch.nn.functional as F from torch.autograd import Variable +import os +from tqdm import tqdm as tq +import time import matplotlib.pyplot as plt -from .sumnet_bn_vgg import SUMNet -from .r2unet import R2U_Net -from .r2unet import U_Net +from .models import SUMNet, U_Net, R2U_Net, Discriminator import json from .data_loader import LungDataLoader -from .utils import dice_coefficient, plot_graphs -from .discriminator import Discriminator, ch_shuffle +from .utils import dice_coefficient, plot_graphs, ch_shuffle + plt.switch_backend('agg') def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4,adv=False): @@ -221,3 +218,4 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc plot_graphs(train_values=trainDiceCoeff_lungs, valid_values=validDiceCoeff_lungs, save_path=save_path, x_label='Epochs', y_label='Dice coefficient', plot_title='Dice coefficient', save_name='Dice_Plot.png') + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py similarity index 98% rename from misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py rename to misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py index a050569492a..f795a6bde97 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/patch_classifier.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py @@ -4,12 +4,11 @@ from torch.optim import lr_scheduler from torch.utils import data from torch.autograd import Variable -import matplotlib.pyplot as plt import time import os -from tqdm import tqdm_notebook as tq +from tqdm import tqdm as tq from .data_loader import LungPatchDataLoader -from .lenet import LeNet +from .models import LeNet from .utils import plot_graphs diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index d284e92376c..b05b137f4e3 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -1,12 +1,18 @@ -import torch -import matplotlib.pyplot as plt import os -from .sumnet_bn_vgg import SUMNet -from .r2unet import U_Net, R2U_Net -from .lenet import LeNet - +import matplotlib.pyplot as plt +import numpy as np +import torch +from .models import LeNet, R2U_Net, SUMNet, U_Net +def ch_shuffle(x): + shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) + shuffIdx2 = 1-shuffIdx1 + d_in = torch.Tensor(x.size()).cuda() + d_in[:,shuffIdx1] = x[:,0] + d_in[:,shuffIdx2] = x[:,1] + shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) + return d_in, shuffLabel def dice_coefficient(pred1, target): smooth = 1e-15 @@ -44,6 +50,12 @@ def load_model(network): net = LeNet() return net +def load_checkpoint(model, checkpoint): + if checkpoint is not None: + model_checkpoint = torch.load(checkpoint) + model.load_state_dict(model_checkpoint) + else: + model.state_dict() def plot_graphs( train_values, valid_values, From a67acf036f425383dd516f4ec4a92dcab7270c33 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Thu, 12 Jan 2023 02:51:26 +0530 Subject: [PATCH 06/47] fixed IR conversion issue, unit test for train and export added --- .../configs/stage1_config.json | 4 +- .../configs/stage2_config.json | 8 +- .../lung_nodule_detection/requirements.txt | 11 +- .../lung_nodule_detection/src/scrap.py | 5 - .../src/train_network.py | 28 +- .../src/utils/data_loader.py | 25 +- .../src/utils/exporter.py | 4 +- .../src/utils/max_unpool_2d.py | 146 ----- .../lung_nodule_detection/src/utils/models.py | 22 +- .../.github/FUNDING.yml | 13 - .../.github/workflows/main.yml | 208 ------- .../src/utils/openvino_pytorch_layers/LICENSE | 201 ------ .../utils/openvino_pytorch_layers/README.md | 41 -- .../utils/openvino_pytorch_layers/__init__.py | 0 .../utils/openvino_pytorch_layers/compare.py | 39 -- .../examples/calculate_grid/calculate_grid.py | 24 - .../examples/calculate_grid/export_model.py | 40 -- .../examples/complex_mul/complex_mul.py | 22 - .../examples/complex_mul/export_model.py | 43 -- .../deformable_conv/deformable_conv.py | 51 -- .../examples/deformable_conv/export_model.py | 111 ---- .../examples/fft/export_model.py | 45 -- .../examples/fft/fft.py | 73 --- .../examples/grid_sample/export_model.py | 50 -- .../examples/grid_sample/grid_sample.py | 12 - .../examples/sparse_conv/export_model.py | 69 -- .../examples/sparse_conv/sparse_conv.py | 55 -- .../examples/unpool/README.md | 47 -- .../examples/unpool/export_model.py | 56 -- .../examples/unpool/unpool.py | 17 - .../mo_extensions/front/onnx/max_unpool.py | 46 -- .../mo_extensions/ops/MaxPoolGrad.py | 24 - .../openvino_extensions/__init__.py | 23 - .../utils/openvino_pytorch_layers/setup.py | 21 - .../tests/requirements.txt | 5 - .../tests/run_tests.py | 147 ----- .../user_ie_extensions/CMakeLists.txt | 27 - .../user_ie_extensions/calculate_grid.cpp | 79 --- .../user_ie_extensions/calculate_grid.hpp | 29 - .../user_ie_extensions/complex_mul.cpp | 89 --- .../user_ie_extensions/complex_mul.hpp | 30 - .../user_ie_extensions/fft.cpp | 372 ----------- .../user_ie_extensions/fft.hpp | 36 -- .../user_ie_extensions/grid_sample.cpp | 125 ---- .../user_ie_extensions/grid_sample.hpp | 30 - .../user_ie_extensions/ov_extension.cpp | 38 -- .../user_ie_extensions/sparse_conv.cpp | 109 ---- .../user_ie_extensions/sparse_conv.hpp | 33 - .../sparse_conv_transpose.cpp | 109 ---- .../sparse_conv_transpose.hpp | 33 - .../user_ie_extensions/unpool.cpp | 86 --- .../user_ie_extensions/unpool.hpp | 41 -- .../src/utils/scrap.ipynb | 589 ------------------ .../src/utils/train_stage1.py | 26 +- .../src/utils/train_stage2.py | 17 +- .../lung_nodule_detection/src/utils/utils.py | 21 +- .../tests/test_export.py | 103 +++ .../lung_nodule_detection/tests/test_train.py | 65 ++ 58 files changed, 261 insertions(+), 3562 deletions(-) delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp delete mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json index 91c51e64bc9..6983532195f 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json @@ -3,7 +3,7 @@ "save_path": "temp_data/stage1/", "json_path": "downloads/test_data/fold4_pos_neg_eq.json", "datapath": "downloads/test_data/stage1/", - "lung_segpath": "temp_data/stage1/", + "lung_segpath": "downloads/test_data/stage1/mask/", "network": "sumnet", "epochs": 5, "lrate": 1e-4, @@ -17,7 +17,7 @@ }, "export":{ "checkpoint": "downloads/model_weights/stage1/sumnet_adv_best_lungs.pt", - "input_shape":[2, 1, 512, 512], + "input_shape":[1, 1, 512, 512], "model_name_onnx": "lung_seg.onnx", "model_name":"lung_seg", "network": "sumnet" diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json index 3f4e05aef0f..180f05b1247 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json @@ -1,13 +1,15 @@ {"train":{ "savepath": "temp_data/stage2/", - "imgpath": "test_data/stage2/", + "imgpath": "downloads/test_data/stage2/", + "jsonpath": "downloads/test_data/patch_data_split.json", "lrate": 1e-4, "epochs": 5, "network": "lenet" }, "inference":{ "modelpath":"temp_data/stage2/", - "imgpath":"test_data/stage2/", + "imgpath":"downloads/test_data/stage2/", + "jsonpath": "downloads/test_data/patch_data_split.json", "network": "lenet" }, "export":{ @@ -17,4 +19,4 @@ "model_name":"lenet_best", "network": "lenet" } -} \ No newline at end of file +} diff --git a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt index ea55fe6e700..f45bd492f42 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt +++ b/misc/pytorch_toolkit/lung_nodule_detection/requirements.txt @@ -1,9 +1,8 @@ -torch==1.5.1 -torchvision==0.6.1 -numpy -openvino-dev[onnx] -onnxruntime -onnx +torch==1.12.0 +torchvision==0.13.0 +numpy==1.19.5 +openvino-dev[onnx]==2022.1.0 +onnxruntime==1.10.0 wget tqdm pytest diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py b/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py deleted file mode 100644 index 79f315d6e10..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/scrap.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch -print(torch.__version__) - -import torchvision -print(torchvision.__version__) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py index 588a2ea1eae..e2b7fdc53e0 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/train_network.py @@ -1,35 +1,13 @@ from utils import train_stage1 -from utils import patch_classifier +from utils import train_stage2 import argparse def main(config): if config["lungseg"] or config["lungsegadv"]: - foldno = config["foldno"] - savepath = config["savepath"] - jsonpath = config["jsonpath"] - datapath = config["datapath"] - lungsegpath = config["lungmask"] - network = config["network"] - if config["epochs"]: - if config["lungsegadv"]: - train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"],adv=True) - else: - train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"]) - else: - if config["lungsegadv"]: - train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network,config["epochs"],adv=True) - else: - train_stage1.train_network(foldno,savepath,jsonpath,datapath,lungsegpath,network) - + train_stage1.train_network(config) else: - savepath = config["savepath"] - imgpath = config["datapath"] - if config["epochs"]: - patch_classifier.lungpatch_classifier(savepath,imgpath,config["epochs"]) - else: - patch_classifier.lungpatch_classifier(savepath,imgpath) - + train_stage2.lungpatch_classifier(config) if __name__ == '__main__': diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index 27f5c194390..6291f8f764d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -49,8 +49,8 @@ def __len__(self): def __getitem__(self,index): filename = self.files[index] - img = Image.fromarray(np.load(self.path+'image/'+filename).astype(float)) - lung_mask = Image.fromarray(np.load(self.lung_path+filename).astype(float)) + img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) + lung_mask = Image.fromarray(np.load(self.path+'mask/'+filename).astype(float)) if self.is_transform: img, lung_mask = self.transform(img,lung_mask) @@ -69,31 +69,38 @@ def transform(self,img,lung_mask): class LungPatchDataLoader(data.Dataset): - def __init__(self,imgpath,split="train_set",is_transform= True): + def __init__(self,imgpath,json_file,split="train_set",is_transform= True): self.split = split - self.imgpath = imgpath+self.split+'/img/' + self.json = json_file + self.files = self.json[self.split] self.is_transform = is_transform - self.files = os.listdir(self.imgpath) - + self.imgpath = imgpath + def __len__(self): return len(self.files) def __getitem__(self,index): filename = self.files[index] - l1 = int(filename.split('_')[1]) + # For actual training with full dataset, uncomment below + # l1 = int(filename.split('_')[1]) + # For testing with dummy data + split_1 = filename.split('_')[1] + l1 = int(float(split_1.split('.')[0])) if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 l2 = 0 else: l2 = 1 label = torch.tensor([l1,l2]) - img = np.load(self.imgpath+filename) + # For data prepared using dataprep script, uncomment below line + # img = np.load(os.path.join(self.imgpath,filename)) + img = np.load(os.path.join(self.imgpath,'img',filename)) if self.is_transform: img= self.transform(img) - return img,label + return img, label def transform(self,img): img = torch.Tensor(img).unsqueeze(0) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py index 3e7c429d525..4e6d1a788fe 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py @@ -23,7 +23,7 @@ def export_model_ir(self): --input_model {input_model} \ --input_shape "{input_shape}" \ --output_dir {output_dir}\ - --log_level=DEBUG\ + --log_level=ERROR\ --extension {openvino_extension_path}""" if self.config.get('verbose_export'): @@ -42,5 +42,5 @@ def export_model_onnx(self): torch.onnx.export(self.model, dummy_input, res_path, input_names=['input'], output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK, + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH, verbose=False) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py deleted file mode 100644 index 96a759bad93..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/max_unpool_2d.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -Modified from Pytorch `MaxUnpool2d` module to support ONNX conversion. - -Source: https://github.com/pytorch/pytorch/issues/25088#issuecomment-1090956803 -""" - -import torch -import torch.nn.functional as F -from torch.autograd import Function -from torch.nn.modules.pooling import _MaxUnpoolNd -from torch.nn.modules.utils import _pair -from torch import nn - -class MaxUnpool2dop(Function): - """We warp the `torch.nn.functional.max_unpool2d` - with an extra `symbolic` method, which is needed while exporting to ONNX. - Users should not call this function directly. - """ - - @staticmethod - def forward(ctx, input, indices, kernel_size, stride, padding, - output_size): - """Forward function of MaxUnpool2dop. - Args: - input (Tensor): Tensor needed to upsample. - indices (Tensor): Indices output of the previous MaxPool. - kernel_size (Tuple): Size of the max pooling window. - stride (Tuple): Stride of the max pooling window. - padding (Tuple): Padding that was added to the input. - output_size (List or Tuple): The shape of output tensor. - Returns: - Tensor: Output tensor. - """ - return F.max_unpool2d(input, indices, kernel_size, stride, padding, - output_size) - - @staticmethod - def symbolic(g, input, indices, kernel_size, stride, padding, output_size): - # get shape - input_shape = g.op('Shape', input) - const_0 = g.op('Constant', value_t=torch.tensor(0)) - const_1 = g.op('Constant', value_t=torch.tensor(1)) - batch_size = g.op('Gather', input_shape, const_0, axis_i=0) - channel = g.op('Gather', input_shape, const_1, axis_i=0) - - # height = (height - 1) * stride + kernel_size - height = g.op( - 'Gather', - input_shape, - g.op('Constant', value_t=torch.tensor(2)), - axis_i=0) - height = g.op('Sub', height, const_1) - height = g.op('Mul', height, - g.op('Constant', value_t=torch.tensor(stride[1]))) - height = g.op('Add', height, - g.op('Constant', value_t=torch.tensor(kernel_size[1]))) - - # width = (width - 1) * stride + kernel_size - width = g.op( - 'Gather', - input_shape, - g.op('Constant', value_t=torch.tensor(3)), - axis_i=0) - width = g.op('Sub', width, const_1) - width = g.op('Mul', width, - g.op('Constant', value_t=torch.tensor(stride[0]))) - width = g.op('Add', width, - g.op('Constant', value_t=torch.tensor(kernel_size[0]))) - - # step of channel - channel_step = g.op('Mul', height, width) - # step of batch - batch_step = g.op('Mul', channel_step, channel) - - # channel offset - range_channel = g.op('Range', const_0, channel, const_1) - range_channel = g.op( - 'Reshape', range_channel, - g.op('Constant', value_t=torch.tensor([1, -1, 1, 1]))) - range_channel = g.op('Mul', range_channel, channel_step) - range_channel = g.op('Cast', range_channel, to_i=7) # 7 is int64 - - # batch offset - range_batch = g.op('Range', const_0, batch_size, const_1) - range_batch = g.op( - 'Reshape', range_batch, - g.op('Constant', value_t=torch.tensor([-1, 1, 1, 1]))) - range_batch = g.op('Mul', range_batch, batch_step) - range_batch = g.op('Cast', range_batch, to_i=7) # 7 is int64 - - # update indices - indices = g.op('Add', indices, range_channel) - indices = g.op('Add', indices, range_batch) - - return g.op( - 'MaxUnpool', - input, - indices, - kernel_shape_i=kernel_size, - strides_i=stride) - - -class MaxUnpool2d(_MaxUnpoolNd): - """This module is modified from Pytorch `MaxUnpool2d` module. - Args: - kernel_size (int or tuple): Size of the max pooling window. - stride (int or tuple): Stride of the max pooling window. - Default: None (It is set to `kernel_size` by default). - padding (int or tuple): Padding that is added to the input. - Default: 0. - """ - - def __init__(self, kernel_size, stride=None, padding=0): - super(MaxUnpool2d, self).__init__() - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride or kernel_size) - self.padding = _pair(padding) - - def forward(self, input, indices, output_size=None): - """Forward function of MaxUnpool2d. - Args: - input (Tensor): Tensor needed to upsample. - indices (Tensor): Indices output of the previous MaxPool. - output_size (List or Tuple): The shape of output tensor. - Default: None. - Returns: - Tensor: Output tensor. - """ - return MaxUnpool2dop.apply(input, indices, self.kernel_size, - self.stride, self.padding, output_size) - - -class Unpool2d(torch.autograd.Function): - @staticmethod - def symbolic(g, x, indices, output_size=None): - if output_size: - return g.op('Unpooling', x, indices, output_size) - else: - return g.op('Unpooling', x, indices) - - @staticmethod - def forward(self, x, indices, output_size=None): - if not output_size is None: - return nn.MaxUnpool2d(2, stride=2)(x, indices, output_size=output_size.size()) - else: - return nn.MaxUnpool2d(2, stride=2)(x, indices) \ No newline at end of file diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py index 81ad49dc2f5..21c019c6b9e 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py @@ -2,8 +2,6 @@ from torch import nn import torch.nn.functional as F from torchvision import models -from .max_unpool_2d import Unpool2d as MaxUnpool2d - class SUMNet(nn.Module): def __init__(self,in_ch,out_ch): @@ -33,18 +31,18 @@ def __init__(self,in_ch,out_ch): self.bn8 = self.encoder[26] self.pool5 = nn.MaxPool2d(2, 2, return_indices = True) - self.unpool5 = MaxUnpool2d() + self.unpool5 = nn.MaxUnpool2d(2, 2) self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1) self.donv5a = nn.Conv2d(512, 512, 3, padding = 1) - self.unpool4 = MaxUnpool2d() + self.unpool4 = nn.MaxUnpool2d(2, 2) self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1) self.donv4a = nn.Conv2d(512, 256, 3, padding = 1) - self.unpool3 = MaxUnpool2d() + self.unpool3 = nn.MaxUnpool2d(2, 2) self.donv3b = nn.Conv2d(512, 256, 3, padding = 1) self.donv3a = nn.Conv2d(256,128, 3, padding = 1) - self.unpool2 = MaxUnpool2d() + self.unpool2 = nn.MaxUnpool2d(2, 2) self.donv2 = nn.Conv2d(256, 64, 3, padding = 1) - self.unpool1 = MaxUnpool2d() + self.unpool1 = nn.MaxUnpool2d(2, 2) self.donv1 = nn.Conv2d(128, 32, 3, padding = 1) self.output = nn.Conv2d(32, out_ch, 1) @@ -64,18 +62,18 @@ def forward(self, x): conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True) pool5, idxs5 = self.pool5(conv5b) - unpool5 = torch.cat([self.unpool5.apply(pool5, idxs5), conv5b], 1) + unpool5 = torch.cat([self.unpool5(pool5 + 1e-10, idxs5), conv5b], 1) donv5b = F.relu(self.donv5b(unpool5), inplace = True) donv5a = F.relu(self.donv5a(donv5b), inplace = True) - unpool4 = torch.cat([self.unpool4.apply(donv5a, idxs4), conv4b], 1) + unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1) donv4b = F.relu(self.donv4b(unpool4), inplace = True) donv4a = F.relu(self.donv4a(donv4b), inplace = True) - unpool3 = torch.cat([self.unpool3.apply(donv4a, idxs3), conv3b], 1) + unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1) donv3b = F.relu(self.donv3b(unpool3), inplace = True) donv3a = F.relu(self.donv3a(donv3b)) - unpool2 = torch.cat([self.unpool2.apply(donv3a, idxs2), conv2], 1) + unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1) donv2 = F.relu(self.donv2(unpool2), inplace = True) - unpool1 = torch.cat([self.unpool1.apply(donv2, idxs1), conv1], 1) + unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1) donv1 = F.relu(self.donv1(unpool1), inplace = True) output = self.output(donv1) return output diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml deleted file mode 100644 index 7a26b0e5570..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml +++ /dev/null @@ -1,13 +0,0 @@ -# These are supported funding model platforms - -github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] -patreon: # Replace with a single Patreon username -open_collective: # Replace with a single Open Collective username -ko_fi: # Replace with a single Ko-fi username -tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel -community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry -liberapay: # Replace with a single Liberapay username -issuehunt: # Replace with a single IssueHunt username -otechie: # Replace with a single Otechie username -lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry -custom: https://www.buymeacoffee.com/dkurt diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml deleted file mode 100644 index a089fe88281..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml +++ /dev/null @@ -1,208 +0,0 @@ -# This is a basic workflow to help you get started with Actions - -name: CI - -# Controls when the action will run. Triggers the workflow on push or pull request -# events but only for the master branch -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -env: - OPENVINO_VERSION: 2022.1.0 - OPENCV_VERSION: 4.5.5 - VERSION: 2022.1.0.dev3 - DIST_WIN: https://registrationcenter-download.intel.com/akdlm/irc_nas/18618/w_openvino_toolkit_p_2022.1.0.643_offline.exe - DIST_MAC: https://registrationcenter-download.intel.com/akdlm/irc_nas/18616/m_openvino_toolkit_p_2022.1.0.643_offline.dmg - -# A workflow run is made up of one or more jobs that can run sequentially or in parallel -jobs: - build_lnx: - runs-on: ubuntu-18.04 - container: - centos:centos8.4.2105 - - steps: - - uses: actions/checkout@v2 - - - name: Install dependencies - run: | - dnf -y --disablerepo '*' --enablerepo=extras swap centos-linux-repos centos-stream-repos - dnf -y distro-sync - yum group install -y "Development Tools" --nobest - yum install -y python3 wget cmake - python3 -m pip install --upgrade pip - - - name: Install OpenVINO - run: | - tee > /tmp/openvino-2022.repo << EOF - [OpenVINO] - name=Intel(R) Distribution of OpenVINO 2022 - baseurl=https://yum.repos.intel.com/openvino/2022 - enabled=1 - gpgcheck=1 - repo_gpgcheck=1 - gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - EOF - mv /tmp/openvino-2022.repo /etc/yum.repos.d - yum repolist | grep -i openvino - yum install -y yum-utils openvino-2022.1.0 - - - name: Build OpenCV - run: | - git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 - mkdir opencv_build && cd opencv_build - cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core - make -j$(nproc --all) install - - - name: Build CPU extensions - run: | - source /opt/intel/openvino_2022/setupvars.sh - cd user_ie_extensions - mkdir build && cd build - cmake .. -DCMAKE_BUILD_TYPE=Release - make -j$(nproc --all) - - - name: Build wheel - run: | - python3 -m pip install wheel - EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.so python3 setup.py build bdist_wheel - mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-manylinux2014_x86_64.whl - - - uses: actions/upload-artifact@v2 - with: - name: "wheel_lnx" - path: "*.whl" - - build_win: - runs-on: windows-latest - - steps: - - uses: actions/checkout@v2 - - - name: Install OpenVINO - run: | - Invoke-WebRequest ${{env.DIST_WIN}} -OutFile openvino.exe - Start-Process -Wait -FilePath "openvino.exe" -ArgumentList "-s -a --silent --eula accept" - shell: pwsh - - - name: Build OpenCV - run: | - git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 - mkdir opencv_build && cd opencv_build - cmake ..\\opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core - cmake --build . --config Release -j 2 - cmake --install . --prefix "C:\opencv_install" - shell: cmd - - - name: Build CPU extensions - run: | - call "C:\Program Files (x86)\Intel\openvino_2022\setupvars.bat" - cd user_ie_extensions - mkdir build && cd build - cmake .. -DOpenCV_DIR="C:\opencv_install" - cmake --build . --config Release -j 2 - shell: cmd - - - name: Build wheel - run: | - python3 -m pip install --upgrade pip - python3 -m pip install wheel - ls user_ie_extensions\build\Release - set EXT_LIB=user_ie_extensions\\build\\Release\\user_cpu_extension.dll - python3 setup.py build bdist_wheel - move dist\\*.whl openvino_extensions-${{env.VERSION}}-py3-none-win_amd64.whl - shell: cmd - - - uses: actions/upload-artifact@v2 - with: - name: "wheel_win" - path: "*.whl" - - build_mac: - runs-on: macos-10.15 - - steps: - - uses: actions/checkout@v2 - - - name: Install OpenVINO - run: | - curl ${{env.DIST_MAC}} -o openvino.dmg - hdiutil attach openvino.dmg - cd /Volumes/m_openvino_toolkit_p_2022.1.0.643_offline/bootstrapper.app/Contents/MacOS/ - sudo ./install.sh -s --eula=accept - - - name: Build OpenCV - run: | - git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 - mkdir opencv_build && cd opencv_build - cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core - make -j$(nproc --all) install - - - name: Build CPU extensions - run: | - source /opt/intel/openvino_2022/setupvars.sh - cd user_ie_extensions - mkdir build && cd build - cmake .. -DCMAKE_BUILD_TYPE=Release - make -j$(nproc --all) - - - name: Build wheel - run: | - python3 -m pip install --upgrade pip - python3 -m pip install wheel - ls user_ie_extensions/build/ - EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.dylib python3 setup.py build bdist_wheel - mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-macosx_10_15_x86_64.whl - - - uses: actions/upload-artifact@v2 - with: - name: "wheel_mac" - path: "*.whl" - - test_lnx: - needs: build_lnx - runs-on: ubuntu-18.04 - - steps: - - uses: actions/checkout@v2 - - - uses: actions/download-artifact@v2 - with: - name: wheel_lnx - - - name: Install dependencies - run: | - sudo apt-get install -y python3-setuptools libopencv-dev - python3 -m pip install --upgrade pip - python3 -m pip install -r tests/requirements.txt - python3 -m pip install -U protobuf - python3 -m pip install openvino-dev[onnx]==${{env.OPENVINO_VERSION}} - - # Also, remove "openvino_extensions" folder to avoid import confusion - - name: Install CPU extensions - run: | - rm -r openvino_extensions - python3 -m pip install *.whl - - - name: Test - run: | - python3 -m pytest tests/run_tests.py - - publish: - if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} - needs: [test_lnx, build_win, build_mac] - runs-on: ubuntu-18.04 - steps: - - uses: actions/download-artifact@v2 - - - name: Publish - env: - TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} - TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} - run: | - python3 -m pip install --upgrade pip - python3 -m pip install twine - python3 -m twine upload wheel*/*.whl --skip-existing diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md deleted file mode 100644 index b7cc66c5f79..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md +++ /dev/null @@ -1,41 +0,0 @@ -Repository with guides to enable some layers from PyTorch in Intel OpenVINO: - -[![CI](https://github.com/dkurt/openvino_pytorch_layers/workflows/CI/badge.svg?branch=master)](https://github.com/dkurt/openvino_pytorch_layers/actions?query=branch%3Amaster) - -* [nn.MaxUnpool2d](examples/unpool) -* [torch.fft](examples/fft) -* [nn.functional.grid_sample](https://github.com/dkurt/openvino_pytorch_layers/tree/master/examples/grid_sample) -* [torchvision.ops.DeformConv2d](examples/deformable_conv) -* [SparseConv](examples/sparse_conv) from [Open3D](https://github.com/isl-org/Open3D) - - -## OpenVINO Model Optimizer extension - -To create OpenVINO IR, use extra `--extension` flag to specify a path to Model Optimizer extensions that perform graph transformations and register custom layers. - -```bash -mo --input_model model.onnx --extension openvino_pytorch_layers/mo_extensions -``` - -## Custom CPU extensions - -You also need to build CPU extensions library which actually has C++ layers implementations: -```bash -source /opt/intel/openvino_2022/setupvars.sh - -cd user_ie_extensions -mkdir build && cd build -cmake .. -DCMAKE_BUILD_TYPE=Release && make -j$(nproc --all) -``` - -Add compiled extensions library to your project: - -```python -from openvino.runtime import Core - -core = Core() -core.add_extension('user_ie_extensions/build/libuser_cpu_extension.so') - -model = ie.read_model('model.xml') -compiled_model = ie.compile_model(model, 'CPU') -``` diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py deleted file mode 100644 index 13fe7c81232..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py +++ /dev/null @@ -1,39 +0,0 @@ -# NOTE: import order is critical for now: extensions, openvino and only then numpy -from openvino_extensions import get_extensions_path -from openvino.inference_engine import IECore - -import argparse -import numpy as np - -parser = argparse.ArgumentParser(description='Compare OpenVINO implementation with reference data') -parser.add_argument('--num_inputs', type=int, default=1) -parser.add_argument('-m', '--model', default="model.xml") -parser.add_argument('-d', '--device', default="CPU") -args = parser.parse_args() - -inputs = {} -shapes = {} -for i in range(args.num_inputs): - suffix = '{}'.format(i if i > 0 else '') - data = np.load('inp' + suffix + '.npy') - inputs['input' + suffix] = data - shapes['input' + suffix] = data.shape - -ref = np.load('ref.npy') - -ie = IECore() -ie.add_extension(get_extensions_path(), 'CPU') -ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') - -net = ie.read_network(args.model) -net.reshape(shapes) -exec_net = ie.load_network(net, args.device) - -out = exec_net.infer(inputs) -out = next(iter(out.values())) - -maxdiff = np.max(np.abs(ref - out)) -print('Reference range: [{}, {}]'.format(np.min(ref), np.max(ref))) -print('Maximal difference:', maxdiff) -if maxdiff > 1e-5: - exit(1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py deleted file mode 100644 index a6c55adcf97..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -class CalculateGrid(torch.autograd.Function): - @staticmethod - def symbolic(g, in_positions): - return g.op("CalculateGrid", in_positions) - - @staticmethod - def forward(self, in_positions): - filter = torch.Tensor([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0], - [0, -1, -1], [0, -1, 0], [0, 0, -1], - [0, 0, 0]]).to(in_positions.device) - - out_pos = in_positions.long().repeat(1, filter.shape[0]).reshape(-1, 3) - filter = filter.repeat(in_positions.shape[0], 1) - - out_pos = out_pos + filter - out_pos = out_pos[out_pos.min(1).values >= 0] - out_pos = out_pos[(~((out_pos.long() % 2).bool()).any(1))] - out_pos = torch.unique(out_pos, dim=0) - - return out_pos + 0.5 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py deleted file mode 100644 index e7a3fd9d40e..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .calculate_grid import CalculateGrid - - -class MyModel(nn.Module): - def __init__(self): - super(MyModel, self).__init__() - self.calculate_grid = CalculateGrid() - - def forward(self, x): - return self.calculate_grid.apply(x) - - -def export(num_points, max_grid_extent): - # Generate a list of unique positions and add a mantissa - np.random.seed(32) - torch.manual_seed(11) - - inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) - inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) - - model = MyModel() - with torch.no_grad(): - torch.onnx.export(model, (inp_pos), 'model.onnx', - input_names=['input'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) - - ref = model(inp_pos).detach().numpy() - - # Pad values with espetial end line (-1, 0, 0) and zeros - ref = np.concatenate((ref, [[-1, 0, 0]])) - ref = np.pad(ref, ((0, inp_pos.shape[0] - ref.shape[0]), (0, 0))) - - np.save('inp', inp_pos.detach().numpy()) - np.save('ref', ref) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py deleted file mode 100644 index d0a854b6c50..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -import torch.nn as nn - -class ComplexMul(torch.autograd.Function): - @staticmethod - def symbolic(g, input_tensor, other_tensor, is_conj = True): - return g.op("ComplexMultiplication", input_tensor, other_tensor, is_conj_i=int(is_conj)) - - @staticmethod - def forward(self, input_tensor, other_tensor): - complex_index = -1 - real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1] - imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0] - - multiplication = torch.cat( - [ - real_part.unsqueeze(dim=complex_index), - imaginary_part.unsqueeze(dim=complex_index), - ], - dim=complex_index, - ) - return multiplication diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py deleted file mode 100644 index 564d93342e6..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py +++ /dev/null @@ -1,43 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .complex_mul import ComplexMul - -class MyModel(nn.Module): - def __init__(self): - super(MyModel, self).__init__() - self.complex_mul = ComplexMul() - - def forward(self, x, y): - return self.complex_mul.apply(x, y) - -def export(inp_shape=[3, 2, 4, 8, 2], other_shape=[3, 2, 4, 8, 2]): - np.random.seed(324) - torch.manual_seed(32) - - model = MyModel() - inp = Variable(torch.randn(inp_shape)) - inp1 = Variable(torch.randn(other_shape)) - model.eval() - - with torch.no_grad(): - torch.onnx.export(model, (inp, inp1), 'model.onnx', - input_names=['input', 'input1'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) - - ref = model(inp, inp1) - np.save('inp', inp.detach().numpy()) - np.save('inp1', inp1.detach().numpy()) - np.save('ref', ref.detach().numpy()) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Generate ONNX model and test data') - parser.add_argument('--inp_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) - parser.add_argument('--other_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) - args = parser.parse_args() - - export(args.inp_shape, args.other_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py deleted file mode 100644 index fce9fa679ea..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py +++ /dev/null @@ -1,51 +0,0 @@ -import torch -import torch.nn as nn -import torchvision.ops as ops - - -class DeformableConvFunc(torch.autograd.Function): - @staticmethod - def symbolic(g, cls, x, offset): - weight = cls.state_dict()["weight"] - weight = g.op("Constant", value_t=weight) - - return g.op( - "DeformableConv2D", - x, - offset, - weight, - strides_i=(cls.stride, cls.stride), - pads_i=(cls.padding, cls.padding, cls.padding, cls.padding), - dilations_i=(cls.dilation, cls.dilation), - deformable_group_i=cls.groups, - ) - - @staticmethod - def forward(self, cls, x, offset): - y = cls.origin_forward(x, offset) - return y - - -class DeformableConvolution(ops.DeformConv2d): - """ - This is a support class which helps export network with SparseConv in ONNX format. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.origin_forward = super().forward - self.stride = kwargs.get("stride", 1) - self.padding = kwargs.get("padding", 0) - self.dilation = kwargs.get("dilation", 1) - self.groups = kwargs.get("groups", 1) - self.pad_l = nn.ConstantPad2d((1, 1, 1, 1), 0) - - def forward(self, x, offset): - """ - Using paddings is a workaround for 2021.4 release. - """ - x = self.pad_l(x) - offset = self.pad_l(offset) - y = DeformableConvFunc.apply(self, x, offset) - y = y[:, :, 1:-1, 1:-1] - return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py deleted file mode 100644 index a7630adedc0..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py +++ /dev/null @@ -1,111 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .deformable_conv import DeformableConvolution - -np.random.seed(324) -torch.manual_seed(32) - - -class MyModel(nn.Module): - def __init__( - self, - inplanes, - outplanes, - kernel_size=3, - stride=1, - padding=1, - dilation=1, - bias=False, - deformable_groups=1, - ): - super(MyModel, self).__init__() - self.def_conv = DeformableConvolution( - inplanes, - outplanes, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - bias=bias, - groups=deformable_groups, - ) - - def forward(self, x, offset): - y = self.def_conv(x, offset) - return y - - -def export( - inplanes, - outplanes, - kernel_size, - stride, - padding, - dilation, - deformable_groups, - inp_shape, - offset_shape, -): - np.random.seed(324) - torch.manual_seed(32) - - model = MyModel( - inplanes, - outplanes, - kernel_size=kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - deformable_groups=deformable_groups, - ) - model.eval() - - x = Variable(torch.randn(inp_shape)) - offset = Variable(torch.randn(offset_shape)) - ref = model(x, offset) - - np.save("inp", x.detach().numpy()) - np.save("inp1", offset.detach().numpy()) - np.save("ref", ref.detach().numpy()) - - with torch.no_grad(): - torch.onnx.export( - model, - (x, offset), - "model.onnx", - input_names=["input", "input1"], - output_names=["output"], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH, - opset_version=12, - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Generate ONNX model and test data") - parser.add_argument("--inp_shape", type=int, nargs="+", default=[1, 15, 128, 240]) - parser.add_argument( - "--offset_shape", type=int, nargs="+", default=[1, 18, 128, 240] - ) - parser.add_argument("--inplanes", type=int, nargs="+", default=15) - parser.add_argument("--outplanes", type=int, nargs="+", default=15) - parser.add_argument("--kernel_size", type=int, nargs="+", default=3) - parser.add_argument("--stride", type=int, nargs="+", default=1) - parser.add_argument("--padding", type=int, nargs="+", default=1) - parser.add_argument("--dilation", type=int, nargs="+", default=1) - parser.add_argument("--deformable_groups", type=int, nargs="+", default=1) - args = parser.parse_args() - - export( - args.inplanes, - args.outplanes, - args.kernel_size, - args.stride, - args.padding, - args.dilation, - args.deformable_groups, - args.inp_shape, - args.offset_shape, - ) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py deleted file mode 100644 index 252c6c61207..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .fft import FFT - - -class MyModel(nn.Module): - def __init__(self, inverse, centred, dims): - super(MyModel, self).__init__() - self.inverse = inverse - self.centred = centred - self.dims = dims - self.fft = FFT() - - def forward(self, x): - return self.fft.apply(x, self.inverse, self.centred, self.dims) - - -def export(shape, inverse, centered, dims): - np.random.seed(324) - torch.manual_seed(32) - - model = MyModel(inverse, centered, dims) - inp = Variable(torch.randn(shape)) - model.eval() - - with torch.no_grad(): - torch.onnx.export(model, inp, 'model.onnx', - input_names=['input'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH) - - ref = model(inp) - np.save('inp', inp.detach().numpy()) - np.save('ref', ref.detach().numpy()) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Generate ONNX model and test data') - parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8, 2]) - args = parser.parse_args() - - export(args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py deleted file mode 100644 index ccc6c872bd5..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py +++ /dev/null @@ -1,73 +0,0 @@ -import torch -from packaging import version -from typing import List, Tuple, Union - -def roll( - data: torch.Tensor, - shift: Union[int, Union[Tuple[int, ...], List[int]]], - dims: Union[int, Union[Tuple, List]], -) -> torch.Tensor: - """ - Similar to numpy roll but applies to pytorch tensors. - Parameters - ---------- - data : torch.Tensor - shift: tuple, int - dims : tuple, list or int - - Returns - ------- - torch.Tensor - """ - if isinstance(shift, (tuple, list)) and isinstance(dims, (tuple, list)): - if len(shift) != len(dims): - raise ValueError(f"Length of shifts and dimensions should be equal. Got {len(shift)} and {len(dims)}.") - for curr_shift, curr_dim in zip(shift, dims): - data = roll(data, curr_shift, curr_dim) - return data - dim_index = dims - shift = shift % data.size(dims) - - if shift == 0: - return data - left_part = data.narrow(dim_index, 0, data.size(dims) - shift) - right_part = data.narrow(dim_index, data.size(dims) - shift, shift) - return torch.cat([right_part, left_part], dim=dim_index) - -def fftshift(data: torch.Tensor, dims) -> torch.Tensor: - shift = [data.size(curr_dim) // 2 for curr_dim in dims] - return roll(data, shift, dims) - -def ifftshift(data: torch.Tensor, dims) -> torch.Tensor: - shift = [(data.size(curr_dim) + 1) // 2 for curr_dim in dims] - return roll(data, shift, dims) - -class FFT(torch.autograd.Function): - @staticmethod - def symbolic(g, x, inverse, centered, dims): - dims = torch.tensor(dims) - dims = g.op("Constant", value_t=dims) - - return g.op('FFT', x, dims, inverse_i=inverse, centered_i=centered) - - @staticmethod - def forward(self, x, inverse, centered, dims): - # https://pytorch.org/docs/stable/torch.html#torch.fft - if centered: - x = ifftshift(x, dims) - - if version.parse(torch.__version__) >= version.parse("1.8.0"): - func = torch.fft.ifftn if inverse else torch.fft.fftn - x = torch.view_as_complex(x) - y = func(x, dim=dims, norm="ortho") - y = torch.view_as_real(y) - else: - signal_ndim = max(dims) - assert dims == list(range(1, signal_ndim + 1)) - func = torch.ifft if inverse else torch.fft - y = func(input=x, signal_ndim=signal_ndim, normalized=True) - - if centered: - y = fftshift(y, dims) - - return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py deleted file mode 100644 index fe3098d72f7..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py +++ /dev/null @@ -1,50 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .grid_sample import GridSample - - -class MyModel(nn.Module): - def __init__(self): - super(MyModel, self).__init__() - self.grid_sample = GridSample() - - def forward(self, x, grid): - return self.grid_sample.apply(x, grid) - - -def export(inp_shape=[5, 3, 6, 9], grid_shape=[5, 6, 9, 2]): - np.random.seed(324) - torch.manual_seed(32) - - if inp_shape[2] != grid_shape[1]: - raise Exception('Input height (got {}) should be equal to grid height (got {})'.format(inp_shape[2], grid_shape[1])) - if inp_shape[3] != grid_shape[2]: - raise Exception('Input width (got {}) should be equal to grid width (got {})'.format(inp_shape[3], grid_shape[2])) - - model = MyModel() - inp = Variable(torch.randn(inp_shape)) - grid = torch.Tensor(np.random.uniform(low=-2, high=2, size=grid_shape)) - model.eval() - - with torch.no_grad(): - torch.onnx.export(model, (inp, grid), 'model.onnx', - input_names=['input', 'input1'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) - - ref = model(inp, grid) - np.save('inp', inp.detach().numpy()) - np.save('inp1', grid.detach().numpy()) - np.save('ref', ref.detach().numpy()) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Generate ONNX model and test data') - parser.add_argument('--inp_shape', type=int, nargs='+', default=[5, 3, 6, 9]) - parser.add_argument('--grid_shape', type=int, nargs='+', default=[5, 6, 9, 2]) - args = parser.parse_args() - - export(args.inp_shape, args.grid_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py deleted file mode 100644 index f69fb7177ae..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py +++ /dev/null @@ -1,12 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -class GridSample(torch.autograd.Function): - @staticmethod - def symbolic(g, x, grid): - return g.op('GridSample', x, grid) - - @staticmethod - def forward(self, x, grid): - return F.grid_sample(x, grid, 'bilinear', 'zeros', True) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py deleted file mode 100644 index 9f2467b720a..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py +++ /dev/null @@ -1,69 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .sparse_conv import SparseConvONNX, SparseConvTransposeONNX - - -def export(num_inp_points, num_out_points, max_grid_extent, in_channels, - filters, kernel_size, normalize, transpose): - np.random.seed(324) - torch.manual_seed(32) - - if transpose: - sparse_conv = SparseConvTransposeONNX(in_channels=in_channels, - filters=filters, - kernel_size=kernel_size, - use_bias=False, - normalize=False) - else: - sparse_conv = SparseConvONNX(in_channels=in_channels, - filters=filters, - kernel_size=kernel_size, - use_bias=False, - normalize=False) - - # Generate a list of unique positions and add a mantissa - def gen_pos(num_points): - inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) - inp_pos = np.unique(inp_pos, axis=0).astype(np.float32) - inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) - return inp_pos - - inp_pos = gen_pos(num_inp_points) - out_pos = gen_pos(num_out_points) if num_out_points else inp_pos - - features = torch.randn([inp_pos.shape[0], in_channels]) - - voxel_size = torch.tensor(1.0) - sparse_conv.eval() - - new_kernel = torch.randn(sparse_conv.state_dict()["kernel"].shape) - sparse_conv.load_state_dict({"kernel": new_kernel, - "offset": sparse_conv.state_dict()["offset"]}) - - with torch.no_grad(): - torch.onnx.export(sparse_conv, (features, inp_pos, out_pos, voxel_size), 'model.onnx', - input_names=['input', 'input1', 'input2', 'voxel_size'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) - - ref = sparse_conv(features, inp_pos, out_pos, voxel_size) - np.save('inp', features.detach().numpy()) - np.save('inp1', inp_pos.detach().numpy()) - np.save('inp2', out_pos.detach().numpy()) - np.save('ref', ref.detach().numpy()) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Generate ONNX model and test data') - parser.add_argument('--num_points', type=int) - parser.add_argument('--max_grid_extent', type=int) - parser.add_argument('--in_channels', type=int) - parser.add_argument('--filters', type=int) - parser.add_argument('--kernel_size', type=int) - args = parser.parse_args() - - export(args.num_points, args.max_grid_extent, - args.in_channels, args.filters, args.kernel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py deleted file mode 100644 index 54f4dbb309f..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py +++ /dev/null @@ -1,55 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from open3d.ml.torch.layers import SparseConv, SparseConvTranspose - -class SparseConvFunc(torch.autograd.Function): - @staticmethod - def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): - kernel = cls.state_dict()["kernel"] - offset = cls.state_dict()["offset"] - kernel = g.op("Constant", value_t=kernel) - offset = g.op("Constant", value_t=offset) - return g.op("SparseConv", feat, in_pos, out_pos, kernel, offset) - - @staticmethod - def forward(self, cls, feat, in_pos, out_pos, voxel_size): - return cls.origin_forward(feat, in_pos, out_pos, voxel_size) - - -class SparseConvONNX(SparseConv): - """ - This is a support class which helps export network with SparseConv in ONNX format. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.origin_forward = super().forward - - def forward(self, feat, in_pos, out_pos, voxel_size): - return SparseConvFunc.apply(self, feat, in_pos, out_pos, voxel_size) - - -class SparseConvTransposeFunc(torch.autograd.Function): - @staticmethod - def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): - kernel = cls.state_dict()["kernel"] - offset = cls.state_dict()["offset"] - kernel = g.op("Constant", value_t=kernel) - offset = g.op("Constant", value_t=offset) - return g.op("SparseConvTranspose", feat, in_pos, out_pos, kernel, offset) - - @staticmethod - def forward(self, cls, feat, in_pos, out_pos, voxel_size): - return cls.origin_forward(feat, in_pos, out_pos, voxel_size) - - -class SparseConvTransposeONNX(SparseConvTranspose): - """ - This is a support class which helps export network with SparseConvTranspose in ONNX format. - """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.origin_forward = super().forward - - def forward(self, feat, in_pos, out_pos, voxel_size): - return SparseConvTransposeFunc.apply(self, feat, in_pos, out_pos, voxel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md deleted file mode 100644 index c46e767975d..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md +++ /dev/null @@ -1,47 +0,0 @@ -Guide of how to enable PyTorch `nn.MaxUnpool2d` in Intel OpenVINO. - - -## Description -There are two problems with OpenVINO and MaxUnpool at the moment of this guide creation: - -* OpenVINO does not have Unpooling kernels -* PyTorch -> ONNX conversion is unimplemented for `nn.MaxUnpool2d` - -So following this guide you will learn -* How to perform PyTorch -> ONNX conversion for unsupported layers -* How to convert ONNX to OpenVINO Intermediate Respresentation (IR) with extensions -* How to write custom CPU layers in OpenVINO - -## Get ONNX model - -MaxUnpool layer in PyTorch takes two inputs - input `features` from any layer and `indices` after MaxPool layer: - -```python -self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) -self.unpool = nn.MaxUnpool2d(2, stride=2) - -output, indices = self.pool(x) -# ... -unpooled = self.unpool(features, indices) -``` - -If your version of PyTorch does not support ONNX model conversion with MaxUnpool, replace every unpool layer definition -```python -self.unpool = nn.MaxUnpool2d(2, stride=2) -``` -to -```python -self.unpool = Unpool2d() -``` - -where `Unpool2d` defined in [unpool.py](./unpool.py). Also, replace op usage from - -```python -self.unpool(features, indices) -``` -to -```python -self.unpool.apply(features, indices) -``` - -See complete example in [export_model.py](./export_model.py). diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py deleted file mode 100644 index e229e47adc3..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py +++ /dev/null @@ -1,56 +0,0 @@ -import numpy as np -import argparse -import torch -import torch.nn as nn -from torch.autograd import Variable -from .unpool import Unpool2d - -np.random.seed(324) -torch.manual_seed(32) - -class MyModel(nn.Module): - def __init__(self, mode): - super(MyModel, self).__init__() - self.mode = mode - self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) - self.conv1 = nn.Conv2d(3, 4, kernel_size=1, stride=1) - self.conv2 = nn.Conv2d(4, 4, kernel_size=1, stride=1) - self.unpool = Unpool2d() - - def forward(self, x): - y = self.conv1(x) - output, indices = self.pool(y) - conv = self.conv2(output) - if self.mode == 'default': - return self.unpool.apply(conv, indices) - elif self.mode == 'dynamic_size': - return self.unpool.apply(conv, indices, x) - else: - raise Exception('Unknown mode: ' + self.mode) - - -def export(mode, shape=[5, 3, 6, 8]): - np.random.seed(324) - torch.manual_seed(32) - - model = MyModel(mode) - inp = Variable(torch.randn(shape)) - model.eval() - - with torch.no_grad(): - torch.onnx.export(model, inp, 'model.onnx', - input_names=['input'], - output_names=['output'], - operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) - - ref = model(inp) - np.save('inp', inp.detach().numpy()) - np.save('ref', ref.detach().numpy()) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Generate ONNX model and test data') - parser.add_argument('--mode', choices=['default', 'dynamic_size'], help='Specify Unpooling behavior') - parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8]) - args = parser.parse_args() - export(args.mode, args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py deleted file mode 100644 index 52f961fe2cc..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py +++ /dev/null @@ -1,17 +0,0 @@ -import torch -import torch.nn as nn - -class Unpool2d(torch.autograd.Function): - @staticmethod - def symbolic(g, x, indices, output_size=None): - if output_size: - return g.op('Unpooling', x, indices, output_size) - else: - return g.op('Unpooling', x, indices) - - @staticmethod - def forward(self, x, indices, output_size=None): - if not output_size is None: - return nn.MaxUnpool2d(2, stride=2)(x, indices, output_size=output_size.size()) - else: - return nn.MaxUnpool2d(2, stride=2)(x, indices) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py deleted file mode 100644 index 8e52a177bfd..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py +++ /dev/null @@ -1,46 +0,0 @@ -# mo_extensions/front/onnx/max_unpool.py -import numpy as np - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from mo_extensions.ops.MaxPoolGrad import MaxPoolGrad -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - -class MaxUnpool(FrontReplacementSubgraph): - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('max_pool0', dict(op='MaxPool')), - ('max_pool1', dict(op='MaxPool')), - ('slice', dict(op='AttributedSlice')), - ('sub', dict(op='Sub')), - ('unpool', dict(op='Unpooling')), - ], - edges=[ - ('max_pool1', 'slice'), - ('max_pool0', 'sub', {'in': 0}), - ('slice', 'sub', {'in': 1}), - ('sub', 'unpool', {'in': 1}), - ]) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict): - max_pool = match['max_pool0'] - max_pool_input = max_pool.in_port(0).get_source().node - unpool = match['unpool'] - unpool_input = unpool.in_port(0).get_source().node - - max_pool.out_port(1).disconnect() - - # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] - inputs = [max_pool_input, max_pool, unpool_input] - - res = MaxPoolGrad(graph, dict(name=unpool.name + '/fused')).create_node(inputs) - unpool.out_port(0).get_connection().set_source(res.out_port(0)) - - if len(unpool.in_ports()) == 3: - unpool.in_port(2).get_source().connect(res.in_port(3)) - else: - max_pool_input.out_port(0).connect(res.in_port(3)) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py deleted file mode 100644 index 4011e697695..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py +++ /dev/null @@ -1,24 +0,0 @@ -# mo_extensions/ops/MaxPoolGrad.py -import numpy as np -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - -def shape_infer(node): - # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] - assert(len(node.in_nodes()) == 4) - node.out_node(0).shape = node.in_node(0).shape - node.out_node(0).shape[2] = node.in_node(3).shape[2] - node.out_node(0).shape[3] = node.in_node(3).shape[3] - -class MaxPoolGrad(Op): - op = 'MaxPoolGrad' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': __class__.op, - 'op': __class__.op, - 'in_ports_count': 4, - 'out_ports_count': 1, - 'infer': shape_infer - }, attrs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py deleted file mode 100644 index ccef5c08085..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -import sys - -def get_extensions_path(): - lib_name = 'user_cpu_extension' - if sys.platform == 'win32': - lib_name += '.dll' - elif sys.platform == 'linux': - lib_name = 'lib' + lib_name + '.so' - else: - lib_name = 'lib' + lib_name + '.dylib' - return os.path.join(os.path.dirname(__file__), lib_name) - - -# This is a dummy procedure which instantiates onnx_importer library preloading -try: - import io - from openvino.inference_engine import IECore - ie = IECore() - buf = io.BytesIO() - ie.read_network(buf.getvalue(), b"", init_from_buffer=True) -except Exception: - pass diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py deleted file mode 100644 index aa3a9d6fb11..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -import os -from setuptools import setup - -if not 'VERSION' in os.environ: - raise Exception('Specify package version by environment variable') - -if not 'EXT_LIB' in os.environ: - raise Exception('Specify environment variable with a path to extensions library') - -setup(name='openvino-extensions', - version=os.environ['VERSION'], - author='Dmitry Kurtaev', - url='https://github.com/dkurt/openvino_pytorch_layers', - packages=['openvino_extensions'], - data_files=[('../../openvino_extensions', [os.environ['EXT_LIB']])], - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - ], -) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt deleted file mode 100644 index 0ccfb5af363..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -torch==1.8.1 -torchvision==0.9.1 -open3d==0.14.1 -tensorboard -pytest diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py deleted file mode 100644 index 4cda23cbf2e..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py +++ /dev/null @@ -1,147 +0,0 @@ -# NOTE: import order is critical for now: extensions, openvino and only then numpy -from openvino_extensions import get_extensions_path -from openvino.runtime import Core - -import subprocess -import pytest -from pathlib import Path - -import numpy as np - -def convert_model(): - subprocess.run(['mo', - '--input_model=model.onnx', - # '--extension', "user_ie_extensions/build/libuser_cpu_extension.so"], - '--extension', get_extensions_path()], - check=True) - -def run_test(convert_ir=True, test_onnx=False, num_inputs=1, threshold=1e-5): - if convert_ir and not test_onnx: - convert_model() - - inputs = {} - shapes = {} - for i in range(num_inputs): - suffix = '{}'.format(i if i > 0 else '') - data = np.load('inp' + suffix + '.npy') - inputs['input' + suffix] = data - shapes['input' + suffix] = data.shape - - ref = np.load('ref.npy') - - ie = Core() - # ie.add_extension("user_ie_extensions/build/libuser_cpu_extension.so") - ie.add_extension(get_extensions_path()) - # ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') - - net = ie.read_model('model.onnx' if test_onnx else 'model.xml') - net.reshape(shapes) - exec_net = ie.compile_model(net, 'CPU') - - out = exec_net.infer_new_request(inputs) - out = next(iter(out.values())) - - assert ref.shape == out.shape - diff = np.max(np.abs(ref - out)) - assert diff <= threshold - - -# def test_unpool(): -# from examples.unpool.export_model import export -# export(mode='default') -# run_test() - - -# def test_unpool_reshape(): -# from examples.unpool.export_model import export -# export(mode='dynamic_size', shape=[5, 3, 6, 9]) -# run_test() - -# export(mode='dynamic_size', shape=[4, 3, 17, 8]) -# run_test(convert_ir=False) - -@pytest.mark.parametrize("shape", [[5, 120, 2], [4, 240, 320, 2], [3, 16, 240, 320, 2], [4, 5, 16, 31, 2]]) -@pytest.mark.parametrize("inverse", [False, True]) -@pytest.mark.parametrize("centered", [False, True]) -@pytest.mark.parametrize("test_onnx", [False, True]) -@pytest.mark.parametrize("dims", [[1], [1, 2], [2, 3]]) -def test_fft(shape, inverse, centered, test_onnx, dims): - from examples.fft.export_model import export - - if len(shape) == 3 and dims != [1] or \ - len(shape) == 4 and dims == [2, 3] or \ - len(shape) == 5 and dims == [1] or \ - centered and len(dims) != 2: - pytest.skip("unsupported configuration") - - export(shape, inverse, centered, dims) - run_test(test_onnx=test_onnx) - - -@pytest.mark.parametrize("test_onnx", [False, True]) -def test_grid_sample(test_onnx): - from examples.grid_sample.export_model import export - - export() - run_test(num_inputs=2, test_onnx=test_onnx) - - -@pytest.mark.parametrize("shape", [[3, 2, 4, 8, 2], [3, 1, 4, 8, 2]]) -@pytest.mark.parametrize("test_onnx", [False, True]) -def test_complex_mul(shape, test_onnx): - from examples.complex_mul.export_model import export - - export(other_shape=shape) - run_test(num_inputs=2, test_onnx=test_onnx) - - -@pytest.mark.parametrize("in_channels", [1, 3]) -@pytest.mark.parametrize("filters", [1, 4]) -@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5], [2, 2, 2]]) -@pytest.mark.parametrize("normalize", [False, True]) -@pytest.mark.parametrize("out_pos", [None, 16]) -def test_sparse_conv(in_channels, filters, kernel_size, normalize, out_pos): - from examples.sparse_conv.export_model import export - - export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, - filters=filters, kernel_size=kernel_size, normalize=normalize, - transpose=False) - run_test(num_inputs=3, test_onnx=True, threshold=1e-4) - - -@pytest.mark.parametrize("in_channels", [1, 3]) -@pytest.mark.parametrize("filters", [1, 4]) -@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5]]) -@pytest.mark.parametrize("normalize", [False]) -@pytest.mark.parametrize("out_pos", [None, 16]) -def test_sparse_conv_transpose(in_channels, filters, kernel_size, normalize, out_pos): - from examples.sparse_conv.export_model import export - - export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, - filters=filters, kernel_size=kernel_size, normalize=normalize, - transpose=True) - run_test(num_inputs=3, test_onnx=True, threshold=1e-4) - - -def test_calculate_grid(): - from examples.calculate_grid.export_model import export - export(num_points=10, max_grid_extent=5) - run_test(test_onnx=True) - - -def test_deformable_conv(): - from examples.deformable_conv.export_model import export - - export( - inplanes=15, - outplanes=15, - kernel_size=3, - stride=1, - padding=1, - dilation=1, - deformable_groups=1, - inp_shape=[1, 15, 128, 240], - offset_shape=[1, 18, 128, 240], - ) - run_test(num_inputs=2, threshold=2e-5) - run_test(num_inputs=2, test_onnx=True, threshold=2e-5) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt deleted file mode 100644 index f5b0576a26d..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2020 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -# [cmake:extension] -set(CMAKE_CXX_STANDARD 11) - -set(TARGET_NAME "user_cpu_extension") - -find_package(OpenVINO REQUIRED) -find_package(OpenCV REQUIRED COMPONENTS core) -# find_package(TBB REQUIRED tbb tbbmalloc) - -file(GLOB_RECURSE SRC *.cpp) - -add_library(${TARGET_NAME} SHARED ${SRC}) - -target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_OPENVINO_EXTENSION_API) - -if (OpenCV_FOUND) - target_include_directories(${TARGET_NAME} PRIVATE ${OpenCV_INCLUDE_DIRS}) -endif() - -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime - # ${TBB_IMPORTED_TARGETS} - ) -# [cmake:extension] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp deleted file mode 100644 index 5545c737013..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "calculate_grid.hpp" - -using namespace TemplateExtension; - -//! [op:ctor] -CalculateGrid::CalculateGrid(const ov::Output& inp_pos) : Op({inp_pos}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void CalculateGrid::validate_and_infer_types() { - auto outShape = get_input_partial_shape(0); - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr CalculateGrid::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 1, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool CalculateGrid::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool CalculateGrid::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* inpPos = reinterpret_cast(inputs[0].data()); - float* out = reinterpret_cast(outputs[0].data()); - - std::set > outPos; - - const size_t numPoints = inputs[0].get_shape()[0]; - static const std::vector > filters {{-1, -1, -1}, {-1, -1, 0}, {-1, 0, -1}, - {-1, 0, 0}, {0, -1, -1}, {0, -1, 0}, - {0, 0, -1}, {0, 0, 0}}; - - std::vector pos(3); - for (size_t i = 0; i < numPoints; ++i) { - for (size_t j = 0; j < filters.size(); ++j) { - bool isValid = true; - for (size_t k = 0; k < 3; ++k) { - int val = static_cast(inpPos[i * 3 + k]) + filters[j][k]; - if (val < 0 || val % 2) { - isValid = false; - break; - } - pos[k] = val; - } - if (isValid) - outPos.insert(std::make_tuple(pos[0], pos[1], pos[2])); - } - } - - int i = 0; - for (const auto it : outPos) { - out[i * 3] = 0.5f + std::get<0>(it); - out[i * 3 + 1] = 0.5f + std::get<1>(it); - out[i * 3 + 2] = 0.5f + std::get<2>(it); - i += 1; - } - memset(out + i * 3, 0, sizeof(float) * 3 * (numPoints - i)); - out[i * 3] = -1.0f; - return true; -} - -bool CalculateGrid::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp deleted file mode 100644 index b436f2d39d0..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class CalculateGrid : public ov::op::Op { -public: - OPENVINO_OP("CalculateGrid"); - - CalculateGrid() = default; - CalculateGrid(const ov::Output& inp_pos); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp deleted file mode 100644 index 86f4af2e464..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "complex_mul.hpp" -// #include -#include - -using namespace TemplateExtension; - -//! [op:ctor] -ComplexMultiplication::ComplexMultiplication( - const ov::Output& inp0, - const ov::Output& inp1) : Op({inp0, inp1}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void ComplexMultiplication::validate_and_infer_types() { - auto outShape = get_input_partial_shape(0); - set_output_type(0, get_input_element_type(1), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr ComplexMultiplication::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool ComplexMultiplication::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool ComplexMultiplication::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* inp0 = reinterpret_cast(inputs[0].data()); - const float* inp1 = reinterpret_cast(inputs[1].data()); - float* out = reinterpret_cast(outputs[0].data()); - - size_t channels0 = inputs[0].get_shape()[1]; - size_t channels1 = inputs[1].get_shape()[1]; - size_t batch = inputs[0].get_shape()[0]; - size_t spatialSize = inputs[0].get_shape()[2] * inputs[0].get_shape()[3]; - - // x1 = x_r * y_r - x_i * y_i - // x2 = x_r * y_i + x_i * y_r - if (channels0 == channels1) - // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { - for (size_t ch = 0; ch < channels0 * batch; ++ch) { - for (int i = 0; i < spatialSize; ++i) { - int outIdx = (ch * spatialSize + i) * 2; - float real0 = inp0[outIdx]; - float imag0 = inp0[outIdx + 1]; - float real1 = inp1[outIdx]; - float imag1 = inp1[outIdx + 1]; - out[outIdx] = real0 * real1 - imag0 * imag1; - out[outIdx + 1] = real0 * imag1 + imag0 * real1; - } - } - else if (channels1 == 1) - // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { - for (size_t ch = 0; ch < channels0 * batch; ++ch) { - int b = ch / channels0; - for (int i = 0; i < spatialSize; ++i) { - int outIdx = (ch * spatialSize + i) * 2; - int inpIdx = (b * spatialSize + i) * 2; - float real0 = inp0[outIdx]; - float imag0 = inp0[outIdx + 1]; - float real1 = inp1[inpIdx]; - float imag1 = inp1[inpIdx + 1]; - out[outIdx] = real0 * real1 - imag0 * imag1; - out[outIdx + 1] = real0 * imag1 + imag0 * real1; - } - } - else - IE_THROW() << "Wrong number of channels for second input!"; - - return true; -} - -bool ComplexMultiplication::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp deleted file mode 100644 index 9dd487f3a0a..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class ComplexMultiplication : public ov::op::Op { -public: - OPENVINO_OP("ComplexMultiplication"); - - ComplexMultiplication() = default; - ComplexMultiplication(const ov::Output& inp0, - const ov::Output& inp1); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp deleted file mode 100644 index 86c8185097f..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "fft.hpp" - -#include -#include
-#include - -using namespace TemplateExtension; - -std::unique_ptr so; -using cvCreateMatHeaderF = CvMat*(int, int, int); -using cvSetDataF = void(CvArr*, void*, int); -using cvReleaseMatF = void(CvMat**); -using cvDftF = void(const CvArr*, CvArr*, int, int); -using cvScaleF = void(const CvArr*, CvArr*, double, double); -using cvCloneMatF = CvMat*(const CvMat*); -using cvCopyF = void(const CvArr*, const CvArr*, const CvArr*); -using cvInitMatHeaderF = CvMat*(CvMat*, int, int, int, void*, int); -using cvGetRawDataF = void(const CvArr*, uchar**, int* step, CvSize* roi_size); -using cvReshapeF = CvMat*(const CvArr*, CvMat*, int, int); -using cvCreateDataF = void(CvArr*); -using cvReleaseDataF = void(CvArr*); - -bool loadOpenCV() { - static bool loaded = false; - if (!loaded) { - loaded = true; - try { -#ifdef _WIN32 - so.reset(new InferenceEngine::details::SharedObjectLoader("opencv_core.dll")); -#elif defined(__APPLE__) - so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.dylib")); -#else - so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.so")); -#endif - } catch (InferenceEngine::details::InferenceEngineException& ex) { - return false; - } - } - return loaded; -} - -void fftshift(CvMat* src, bool inverse) { - static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); - static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); - static auto cvInitMatHeader = reinterpret_cast(so->get_symbol("cvInitMatHeader")); - static auto cvGetRawData = reinterpret_cast(so->get_symbol("cvGetRawData")); - static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); - - - // tl | tr br | bl - // ---+--- -> ---+--- - // bl | br tr | tl - - float* data; - int step; - CvSize size; - cvGetRawData(src, (uchar**)&data, &step, &size); - - int height = size.height; - int width = size.width; - int h2 = height / 2; - int w2 = width / 2; - - if (height % 2 || width % 2) { - // Swap rows. - CvMat* srcTop = new CvMat(); - CvMat* srcBot = new CvMat(); - CvMat* dstTop = new CvMat(); - CvMat* dstBot = new CvMat(); - int topH = inverse ? h2 : (h2 + height % 2); - int botH = height - topH; - cvInitMatHeader(srcTop, topH, width, CV_32FC2, data, step); - cvInitMatHeader(srcBot, botH, width, CV_32FC2, data + topH * width * 2, step); - cvInitMatHeader(dstTop, topH, width, CV_32FC2, data + botH * width * 2, step); - cvInitMatHeader(dstBot, botH, width, CV_32FC2, data, step); - - CvMat* tmp = cvCloneMat(srcTop); - cvCopy(srcBot, dstBot, 0); - cvCopy(tmp, dstTop, 0); - - cvReleaseMat(&tmp); - delete srcTop; - delete srcBot; - delete dstTop; - delete dstBot; - - // Swap columns. - CvMat* srcL = new CvMat(); - CvMat* srcR = new CvMat(); - CvMat* dstL = new CvMat(); - CvMat* dstR = new CvMat(); - int leftW = inverse ? w2 : (w2 + width % 2); - int rightW = width - leftW; - - cvInitMatHeader(srcL, height, leftW, CV_32FC2, data, step); - cvInitMatHeader(srcR, height, rightW, CV_32FC2, data + leftW * 2, step); - cvInitMatHeader(dstL, height, leftW, CV_32FC2, data + rightW * 2, step); - cvInitMatHeader(dstR, height, rightW, CV_32FC2, data, step); - - tmp = cvCloneMat(srcL); - cvCopy(srcR, dstR, 0); - cvCopy(tmp, dstL, 0); - - cvReleaseMat(&tmp); - delete srcL; - delete srcR; - delete dstL; - delete dstR; - - return; - } - - CvMat* tl = new CvMat(); - CvMat* tr = new CvMat(); - CvMat* bl = new CvMat(); - CvMat* br = new CvMat(); - - cvInitMatHeader(tl, h2, w2, CV_32FC2, data, step); - cvInitMatHeader(tr, h2, w2, CV_32FC2, data + width, step); - cvInitMatHeader(bl, h2, w2, CV_32FC2, data + height * width, step); - cvInitMatHeader(br, h2, w2, CV_32FC2, data + height * width + width, step); - - CvArr* mask = 0; - CvMat* tmp = cvCloneMat(tl); - cvCopy(br, tl, mask); - cvCopy(tmp, br, mask); - - cvCopy(tr, tmp, mask); - cvCopy(bl, tr, mask); - cvCopy(tmp, bl, mask); - - cvReleaseMat(&tmp); - - delete tl; - delete tr; - delete bl; - delete br; -} - -//! [op:ctor] -FFT::FFT(const ov::Output& inp, - const ov::Output& dims, - bool inverse, - bool centered) : Op({inp, dims}) { - loadOpenCV(); - constructor_validate_and_infer_types(); - this->inverse = inverse; - this->centered = centered; -} -//! [op:ctor] - -//! [op:validate] -void FFT::validate_and_infer_types() { - auto outShape = get_input_partial_shape(0); - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr FFT::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1), inverse, centered); -} -//! [op:copy] - -//! [op:visit_attributes] -bool FFT::visit_attributes(ov::AttributeVisitor& visitor) { - int inverse_i = static_cast(inverse); - int centered_i = static_cast(centered); - visitor.on_attribute("inverse", inverse_i); - visitor.on_attribute("centered", centered_i); - inverse = static_cast(inverse_i); - centered = static_cast(centered_i); - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool FFT::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - static auto cvSetData = reinterpret_cast(so->get_symbol("cvSetData")); - static auto cvCreateMatHeader = reinterpret_cast(so->get_symbol("cvCreateMatHeader")); - static auto cvDFT = reinterpret_cast(so->get_symbol("cvDFT")); - static auto cvScale = reinterpret_cast(so->get_symbol("cvConvertScale")); - static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); - static auto cvReshape = reinterpret_cast(so->get_symbol("cvReshape")); - static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); - static auto cvCreateData = reinterpret_cast(so->get_symbol("cvCreateData")); - static auto cvReleaseData = reinterpret_cast(so->get_symbol("cvReleaseData")); - static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); - - float* inpData = reinterpret_cast(inputs[0].data()); - - if (inputs[1].get_element_type() != ov::element::i32) - IE_THROW() << "Unexpected dims type: " << inputs[1].get_element_type(); - - int32_t* signalDimsData = reinterpret_cast(inputs[1].data()); - float* outData = reinterpret_cast(outputs[0].data()); - std::vector dims = inputs[0].get_shape(); - const size_t numSignalDims = inputs[1].get_shape()[0]; - - if (!(dims.size() == 3 && (numSignalDims == 1 && signalDimsData[0] == 1) || - dims.size() == 4 && ((numSignalDims == 1 && signalDimsData[0] == 1) || - (numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2)) || - dims.size() == 5 && ((numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) || - (numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3)))) { - std::ostringstream ss; - for (size_t i = 0; i < numSignalDims; ++i) - ss << signalDimsData[i] << " "; - IE_THROW() << "Unsupported configuration: Input dims " << dims.size() << " and signal dims " << ss.str(); - } - - const int batch = dims[0]; - - if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { - const int channels = dims[1]; - int rows = dims[2]; - int cols = dims[3]; - const int planeSize = channels * rows * cols; - // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { - for (size_t d = 0; d < batch * cols; ++d) { - int b = d / cols; - int col = d % cols; - // Copy a slice from input - CvMat* inpSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); - CvMat* outSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); - cvSetData(inpSlice, reinterpret_cast(inpData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); - cvSetData(outSlice, reinterpret_cast(outData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); - - CvMat* inp_col = cvCloneMat(inpSlice); - - CvMat inp_header, *inp; - inp = cvReshape(inp_col, &inp_header, 2, channels); - - CvMat* out = cvCreateMatHeader(channels, rows, CV_32FC2); - cvCreateData(out); - - if (centered) - fftshift(inp, true); - - if (inverse) - cvDFT(inp, out, CV_DXT_INVERSE, 0); - else - cvDFT(inp, out, CV_DXT_FORWARD, 0); - cvScale(out, out, 1.0 / sqrtf(channels * rows), 0); - - if (centered) - fftshift(out, false); - - CvMat out_col_header, *out_col; - out_col = cvReshape(out, &out_col_header, 2, channels * rows); - - cvCopy(out_col, outSlice, 0); - - cvReleaseData(inp_col); - cvReleaseMat(&inp_col); - - cvReleaseData(out); - cvReleaseMat(&out); - - cvReleaseMat(&inpSlice); - cvReleaseMat(&outSlice); - } - } else if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3) { - const int channels = dims[1]; - int rows = dims[2]; - int cols = dims[3]; - int planeSize = rows * cols * 2; // 2 is last dimension size - // InferenceEngine::parallel_for(batch * channels, [&](size_t d) { - for (size_t d = 0; d < batch * channels; ++d) { - CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); - CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); - cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); - cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); - - if (centered) - fftshift(inp, true); - - if (inverse) - cvDFT(inp, out, CV_DXT_INVERSE, 0); - else - cvDFT(inp, out, CV_DXT_FORWARD, 0); - cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); - - if (centered) - fftshift(out, false); - - cvReleaseMat(&inp); - cvReleaseMat(&out); - } - } else if (dims.size() == 4 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { - int rows = dims[1]; - int cols = dims[2]; - int planeSize = rows * cols * 2; // 2 is last dimension size - // InferenceEngine::parallel_for(batch, [&](size_t d) { - for (size_t d = 0; d < batch; ++d) { - CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); - CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); - cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); - cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); - - if (centered) - fftshift(inp, true); - - if (inverse) - cvDFT(inp, out, CV_DXT_INVERSE, 0); - else - cvDFT(inp, out, CV_DXT_FORWARD, 0); - cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); - - if (centered) - fftshift(out, false); - - cvReleaseMat(&inp); - cvReleaseMat(&out); - } - } else if (dims.size() == 4 && numSignalDims == 1 && signalDimsData[0] == 1) { - int rows = dims[1]; - int cols = dims[2]; - - const int planeSize = rows; - // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { - for (size_t d = 0; d < batch * cols; ++d) { - int b = d / cols; - int col = d % cols; - CvMat* inp = cvCreateMatHeader(rows, 1, CV_32FC2); - CvMat* out = cvCreateMatHeader(rows, 1, CV_32FC2); - cvSetData(inp, reinterpret_cast(inpData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); - cvSetData(out, reinterpret_cast(outData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); - - if (centered) - fftshift(inp, true); - - if (inverse) - cvDFT(inp, out, CV_DXT_INVERSE, 0); - else - cvDFT(inp, out, CV_DXT_FORWARD, 0); - cvScale(out, out, 1.0 / sqrtf(rows), 0); - - if (centered) - fftshift(out, false); - - cvReleaseMat(&inp); - cvReleaseMat(&out); - } - } else if (dims.size() == 3) { - int rows = dims[0]; - int cols = dims[1]; - CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); - CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); - cvSetData(inp, reinterpret_cast(inpData), cols * 2 * sizeof(float)); - cvSetData(out, reinterpret_cast(outData), cols * 2 * sizeof(float)); - - if (inverse) - cvDFT(inp, out, CV_DXT_INVERSE | CV_DXT_ROWS, 0); - else - cvDFT(inp, out, CV_DXT_FORWARD | CV_DXT_ROWS, 0); - cvScale(out, out, 1.0 / sqrtf(cols), 0); - - cvReleaseMat(&inp); - cvReleaseMat(&out); - } - return true; -} - -bool FFT::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp deleted file mode 100644 index f541f91c66d..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class FFT : public ov::op::Op { -public: - OPENVINO_OP("FFT"); - - FFT() = default; - FFT(const ov::Output& inp, - const ov::Output& dims, - bool inverse, - bool centered); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; - -private: - bool inverse = false; - bool centered = false; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp deleted file mode 100644 index 94fa44336f2..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "grid_sample.hpp" - -using namespace TemplateExtension; - -//! [op:ctor] -GridSample::GridSample(const ov::Output& inp, - const ov::Output& grid) : Op({inp, grid}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void GridSample::validate_and_infer_types() { - auto outShape = get_input_partial_shape(0); // NC - // Grid input has a shape NxHxWx2 - auto gridShape = get_input_partial_shape(1); - outShape[2] = gridShape[1]; // H - outShape[3] = gridShape[2]; // W - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr GridSample::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool GridSample::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool GridSample::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* inpData = reinterpret_cast(inputs[0].data()); - const float* gridData = reinterpret_cast(inputs[1].data()); - float* outData = reinterpret_cast(outputs[0].data()); - - std::vector inpDims = inputs[0].get_shape(); - std::vector outDims = outputs[0].get_shape(); - - const int batch = outDims[0]; - const int channels = outDims[1]; - const int height = outDims[2]; - const int width = outDims[3]; - const int inpHeight = inpDims[2]; - const int inpWidth = inpDims[3]; - const int inpPlane = inpHeight * inpWidth; - const int outPlane = height * width; - - std::vector zerosPlane(inpDims[1] * inpDims[2] * inpDims[3], 0); - float* zeros = zerosPlane.data(); - - // InferenceEngine::parallel_for(batch, [&](int d) { - for (int d = 0; d < batch; ++d) { - const float* inp = inpData + d * channels * inpPlane; - const float* grid = gridData + d * outPlane * 2; - for (int y = 0; y < height; ++y) { - for (int x = 0; x < width; ++x) { - int offset = y * width + x; - - float input_x = 0.5f * (grid[offset * 2] + 1) * (inpWidth - 1); - int x0 = std::floor(input_x); - int x1 = x0 + 1; - - float input_y = 0.5f * (grid[offset * 2 + 1] + 1) * (inpHeight - 1); - int y0 = std::floor(input_y); - int y1 = y0 + 1; - - const float* inp_row0 = (0 <= y0 && y0 < inpHeight) ? inp + y0 * inpWidth : zeros; - const float* inp_row1 = (0 <= y1 && y1 < inpHeight) ? inp + y1 * inpWidth : zeros; - float* out = outData + d * channels * outPlane; - if ((x1 < 0 || inpWidth <= x1) && (x0 < 0 || inpWidth <= x0)) { - for (int c = 0; c < channels; ++c) { - out[offset] = 0; - out += outPlane; - } - } - else if (x1 < 0 || inpWidth <= x1) { - for (int c = 0; c < channels; ++c) { - out[offset] = inp_row0[x0] + - (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + - (input_x - x0) * (-inp_row0[x0] + - (input_y - y0) * (inp_row0[x0] - inp_row1[x0])); - out += outPlane; - inp_row0 += inpPlane; - inp_row1 += inpPlane; - } - } - else if (x0 < 0 || inpWidth <= x0) { - for (int c = 0; c < channels; ++c) { - out[offset] = - (input_x - x0) * (inp_row0[x1] + (input_y - y0) * (inp_row1[x1] - inp_row0[x1])); - out += outPlane; - inp_row0 += inpPlane; - inp_row1 += inpPlane; - } - } else { - for (int c = 0; c < channels; ++c) { - out[offset] = inp_row0[x0] + - (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + - (input_x - x0) * (inp_row0[x1] - inp_row0[x0] + - (input_y - y0) * (inp_row1[x1] - inp_row0[x1] - inp_row1[x0] + inp_row0[x0])); - out += outPlane; - inp_row0 += inpPlane; - inp_row1 += inpPlane; - } - } - } - } - } - return true; -} - -bool GridSample::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp deleted file mode 100644 index be259717045..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class GridSample : public ov::op::Op { -public: - OPENVINO_OP("GridSample"); - - GridSample() = default; - GridSample(const ov::Output& inp, - const ov::Output& grid); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp deleted file mode 100644 index e048e5f08cd..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "unpool.hpp" -#include "sparse_conv.hpp" -#include "sparse_conv_transpose.hpp" -#include "complex_mul.hpp" -#include "calculate_grid.hpp" -#include "grid_sample.hpp" -#include "fft.hpp" - -// clang-format off -//! [ov_extension:entry_point] -OPENVINO_CREATE_EXTENSIONS( - std::vector({ - // Register operation itself, required to be read from IR - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - - // Register operaton mapping, required when converted from framework model format - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>(), - std::make_shared>() - })); -//! [ov_extension:entry_point] -// clang-format on diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp deleted file mode 100644 index dd561626c43..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "sparse_conv.hpp" - -using namespace TemplateExtension; - -//! [op:ctor] -SparseConv::SparseConv(const ov::Output& features, - const ov::Output& inp_pos, - const ov::Output& out_pos, - const ov::Output& kernel, - const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void SparseConv::validate_and_infer_types() { - auto outShape = get_input_partial_shape(2); - auto kernelShape = get_input_partial_shape(3); - outShape[1] = kernelShape[4]; - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr SparseConv::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool SparseConv::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool SparseConv::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* features = reinterpret_cast(inputs[0].data()); - const float* inpPos = reinterpret_cast(inputs[1].data()); - const float* outPos = reinterpret_cast(inputs[2].data()); - const float* kernel = reinterpret_cast(inputs[3].data()); - const float* offset = reinterpret_cast(inputs[4].data()); - float* out = reinterpret_cast(outputs[0].data()); - memset(out, 0, outputs[0].get_byte_size()); - - size_t numInpPoints = inputs[1].get_shape()[0]; - const size_t numOutPoints = inputs[2].get_shape()[0]; - std::vector kernelDims = inputs[3].get_shape(); - - // Kernel layout is DxHxWxICxOH - const int kd = kernelDims[0]; - const int kh = kernelDims[1]; - const int kw = kernelDims[2]; - const int IC = kernelDims[3]; - const int OC = kernelDims[4]; - - // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py - float rw = kw * 0.51f; - float rh = kh * 0.51f; - float rd = kd * 0.51f; - - for (size_t i = 0; i < numInpPoints; ++i) { - if (inpPos[i * 3] < 0) { - numInpPoints = i; - break; - } - } - - for (size_t i = 0; i < numOutPoints; ++i) { - const float xi = outPos[i * 3] - offset[0]; - const float yi = outPos[i * 3 + 1] - offset[1]; - const float zi = outPos[i * 3 + 2] - offset[2]; - - // Accumulate features which inside the kernel - for (size_t j = 0; j < numInpPoints; ++j) { - const float xj = inpPos[j * 3]; - const float yj = inpPos[j * 3 + 1]; - const float zj = inpPos[j * 3 + 2]; - - if (xi - rw <= xj && xj <= xi + rw && - yi - rh <= yj && yj <= yi + rh && - zi - rd <= zj && zj <= zi + rd) { - - const int w = std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); - const int h = std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); - const int d = std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); - - const float* featuresOffset = features + j * IC; - for (size_t ic = 0; ic < IC; ++ic) { - const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); - for (size_t oc = 0; oc < OC; ++oc) { - out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; - } - } - } - } - } - return true; -} - -bool SparseConv::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp deleted file mode 100644 index 8d508e725aa..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class SparseConv : public ov::op::Op { -public: - OPENVINO_OP("SparseConv"); - - SparseConv() = default; - SparseConv(const ov::Output& features, - const ov::Output& inp_pos, - const ov::Output& out_pos, - const ov::Output& kernel, - const ov::Output& offset); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp deleted file mode 100644 index dfd8d525116..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "sparse_conv_transpose.hpp" - -using namespace TemplateExtension; - -//! [op:ctor] -SparseConvTranspose::SparseConvTranspose(const ov::Output& features, - const ov::Output& inp_pos, - const ov::Output& out_pos, - const ov::Output& kernel, - const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void SparseConvTranspose::validate_and_infer_types() { - auto outShape = get_input_partial_shape(2); - auto kernelShape = get_input_partial_shape(3); - outShape[1] = kernelShape[4]; - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr SparseConvTranspose::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool SparseConvTranspose::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool SparseConvTranspose::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* features = reinterpret_cast(inputs[0].data()); - const float* inpPos = reinterpret_cast(inputs[1].data()); - const float* outPos = reinterpret_cast(inputs[2].data()); - const float* kernel = reinterpret_cast(inputs[3].data()); - const float* offset = reinterpret_cast(inputs[4].data()); - float* out = reinterpret_cast(outputs[0].data()); - memset(out, 0, outputs[0].get_byte_size()); - - size_t numInpPoints = inputs[1].get_shape()[0]; - const size_t numOutPoints = inputs[2].get_shape()[0]; - std::vector kernelDims = inputs[3].get_shape(); - - // Kernel layout is DxHxWxICxOH - const int kd = kernelDims[0]; - const int kh = kernelDims[1]; - const int kw = kernelDims[2]; - const int IC = kernelDims[3]; - const int OC = kernelDims[4]; - - // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py - float rw = kw * 0.51f; - float rh = kh * 0.51f; - float rd = kd * 0.51f; - - for (size_t i = 0; i < numInpPoints; ++i) { - if (inpPos[i * 3] < 0) { - numInpPoints = i; - break; - } - } - - for (size_t i = 0; i < numOutPoints; ++i) { - const float xi = outPos[i * 3] - offset[0]; - const float yi = outPos[i * 3 + 1] - offset[1]; - const float zi = outPos[i * 3 + 2] - offset[2]; - - // Accumulate features which inside the kernel - for (size_t j = 0; j < numInpPoints; ++j) { - const float xj = inpPos[j * 3]; - const float yj = inpPos[j * 3 + 1]; - const float zj = inpPos[j * 3 + 2]; - - if (xi - rw <= xj && xj <= xi + rw && - yi - rh <= yj && yj <= yi + rh && - zi - rd <= zj && zj <= zi + rd) { - - const int w = kw - 1 - std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); - const int h = kh - 1 - std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); - const int d = kd - 1 - std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); - - const float* featuresOffset = features + j * IC; - for (size_t ic = 0; ic < IC; ++ic) { - const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); - for (size_t oc = 0; oc < OC; ++oc) { - out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; - } - } - } - } - } - return true; -} - -bool SparseConvTranspose::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp deleted file mode 100644 index f607d5462df..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] - -//! [op:header] -namespace TemplateExtension { - -class SparseConvTranspose : public ov::op::Op { -public: - OPENVINO_OP("SparseConvTranspose"); - - SparseConvTranspose() = default; - SparseConvTranspose(const ov::Output& features, - const ov::Output& inp_pos, - const ov::Output& out_pos, - const ov::Output& kernel, - const ov::Output& offset); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp deleted file mode 100644 index caae15055fb..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "unpool.hpp" -// #include - -using namespace TemplateExtension; - -//! [op:ctor] -Unpool::Unpool(const ov::Output& poolInp, - const ov::Output& poolOut, - const ov::Output& inp, - const ov::Output& shape) : Op({poolInp, poolOut, inp, shape}) { - constructor_validate_and_infer_types(); -} -//! [op:ctor] - -//! [op:validate] -void Unpool::validate_and_infer_types() { - auto outShape = get_input_partial_shape(3); - auto poolInpShape = get_input_partial_shape(0).to_shape(); - outShape[0] = poolInpShape[0]; // Use only spatial dimensions from shape - outShape[1] = poolInpShape[1]; // and restore batch and channels - set_output_type(0, get_input_element_type(0), outShape); -} -//! [op:validate] - -//! [op:copy] -std::shared_ptr Unpool::clone_with_new_inputs(const ov::OutputVector& new_args) const { - OPENVINO_ASSERT(new_args.size() == 4, "Incorrect number of new arguments"); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); -} -//! [op:copy] - -//! [op:visit_attributes] -bool Unpool::visit_attributes(ov::AttributeVisitor& visitor) { - return true; -} -//! [op:visit_attributes] - -//! [op:evaluate] -bool Unpool::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { - const float* poolInp = reinterpret_cast(inputs[0].data()); - const float* poolOut = reinterpret_cast(inputs[1].data()); - const float* inp = reinterpret_cast(inputs[2].data()); - float* out = reinterpret_cast(outputs[0].data()); - - std::vector poolInpDims = inputs[0].get_shape(); - std::vector poolOutDims = inputs[1].get_shape(); - std::vector inpDims = inputs[2].get_shape(); - std::vector outDims = outputs[0].get_shape(); - - const size_t batch = poolInpDims[0]; - const size_t channels = poolInpDims[1]; - const size_t height = poolInpDims[2]; - const size_t width = poolInpDims[3]; - const size_t outHeight = outDims[2]; - const size_t outWidth = outDims[3]; - const size_t poolOutHeight = poolOutDims[2]; - const size_t poolOutWidth = poolOutDims[3]; - - std::vector mask(inputs[1].get_size(), false); - - memset(out, 0, outputs[0].get_byte_size()); - // InferenceEngine::parallel_for(batch*channels, [&](size_t d) { - for (size_t d = 0; d < batch * channels; ++d) { - for (int y = 0; y < height; ++y) { - for (int x = 0; x < width; ++x) { - int poolOutIdx = (d * poolOutHeight + y / 2) * poolOutWidth + x / 2; - int poolInpIdx = (d * height + y) * width + x; - int dstIdx = d * outHeight * outWidth + (y * width + x); - if (fabs(poolInp[poolInpIdx] - poolOut[poolOutIdx]) < 1e-5f && !mask[poolOutIdx]) { - out[dstIdx] = inp[poolOutIdx]; - mask[poolOutIdx] = true; - } - } - } - } - return true; -} - -bool Unpool::has_evaluate() const { - return true; -} -//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp deleted file mode 100644 index 275a80c23f7..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -//! [op:common_include] -#include -//! [op:common_include] -//! [op:frontend_include] -#ifdef OPENVINO_ONNX_FRONTEND_ENABLED -# include -#endif -//! [op:frontend_include] - -//! [op:header] -namespace TemplateExtension { - -class Unpool : public ov::op::Op { -public: - OPENVINO_OP("MaxPoolGrad"); - -#ifdef OPENVINO_ONNX_FRONTEND_ENABLED - OPENVINO_FRAMEWORK_MAP(onnx, "MaxPoolGrad") -#endif - - Unpool() = default; - Unpool(const ov::Output& poolInp, - const ov::Output& poolOut, - const ov::Output& inp, - const ov::Output& shape); - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; - bool visit_attributes(ov::AttributeVisitor& visitor) override; - - bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; - bool has_evaluate() const override; -}; -//! [op:header] - -} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb deleted file mode 100644 index 22a39b40125..00000000000 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/scrap.ipynb +++ /dev/null @@ -1,589 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "ename": "ImportError", - "evalue": "attempted relative import with no known parent package", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mnn\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mnn\u001b[39;00m\n\u001b[0;32m----> 3\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mmodels\u001b[39;00m \u001b[39mimport\u001b[39;00m SUMNet\n\u001b[1;32m 5\u001b[0m n \u001b[39m=\u001b[39m SUMNet()\n\u001b[1;32m 6\u001b[0m example_weight \u001b[39m=\u001b[39m torch\u001b[39m.\u001b[39mrand(\u001b[39m1\u001b[39m, \u001b[39m1\u001b[39m, \u001b[39m3\u001b[39m, \u001b[39m3\u001b[39m)\n", - "File \u001b[0;32m~/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py:5\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mtorch\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mnn\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mfunctional\u001b[39;00m \u001b[39mas\u001b[39;00m \u001b[39mF\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtorchvision\u001b[39;00m \u001b[39mimport\u001b[39;00m models\n\u001b[0;32m----> 5\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39m.\u001b[39;00m\u001b[39mmax_unpool_2d\u001b[39;00m \u001b[39mimport\u001b[39;00m MaxUnpool2d\n\u001b[1;32m 7\u001b[0m \u001b[39mclass\u001b[39;00m \u001b[39mSUMNet\u001b[39;00m(nn\u001b[39m.\u001b[39mModule):\n\u001b[1;32m 8\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__init__\u001b[39m(\u001b[39mself\u001b[39m,in_ch,out_ch):\n", - "\u001b[0;31mImportError\u001b[0m: attempted relative import with no known parent package" - ] - } - ], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "from models import SUMNet\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection\n" - ] - } - ], - "source": [ - "cd .." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "from src.utils.models import SUMNet" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/deeptensor/anaconda3/envs/openv/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n", - " warnings.warn(\n", - "/home/deeptensor/anaconda3/envs/openv/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=VGG11_BN_Weights.IMAGENET1K_V1`. You can also use `weights=VGG11_BN_Weights.DEFAULT` to get the most up-to-date weights.\n", - " warnings.warn(msg)\n" - ] - } - ], - "source": [ - "n = SUMNet(in_ch=1,out_ch=2)\n", - "weights = torch.load(\"/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/downloads/model_weights/stage1/sumnet_adv_best_lungs.pt\")\n", - "n.load_state_dict(weights)\n", - "# example_weight = torch.rand(1, 1, 3, 3)\n", - "example_forward_input = torch.rand(2, 1, 64, 64)\n", - "\n", - "# Trace a specific method and construct `ScriptModule` with\n", - "# a single `forward` method\n", - "module = torch.jit.trace(n.forward, example_forward_input)\n", - "\n", - "# Trace a module (implicitly traces `forward`) and construct a\n", - "# `ScriptModule` with a single `forward` method\n", - "module = torch.jit.trace(n, example_forward_input)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "SUMNet(\n", - " original_name=SUMNet\n", - " (encoder): Sequential(\n", - " original_name=Sequential\n", - " (0): Conv2d(original_name=Conv2d)\n", - " (1): BatchNorm2d(original_name=BatchNorm2d)\n", - " (2): ReLU(original_name=ReLU)\n", - " (3): MaxPool2d(original_name=MaxPool2d)\n", - " (4): Conv2d(original_name=Conv2d)\n", - " (5): BatchNorm2d(original_name=BatchNorm2d)\n", - " (6): ReLU(original_name=ReLU)\n", - " (7): MaxPool2d(original_name=MaxPool2d)\n", - " (8): Conv2d(original_name=Conv2d)\n", - " (9): BatchNorm2d(original_name=BatchNorm2d)\n", - " (10): ReLU(original_name=ReLU)\n", - " (11): Conv2d(original_name=Conv2d)\n", - " (12): BatchNorm2d(original_name=BatchNorm2d)\n", - " (13): ReLU(original_name=ReLU)\n", - " (14): MaxPool2d(original_name=MaxPool2d)\n", - " (15): Conv2d(original_name=Conv2d)\n", - " (16): BatchNorm2d(original_name=BatchNorm2d)\n", - " (17): ReLU(original_name=ReLU)\n", - " (18): Conv2d(original_name=Conv2d)\n", - " (19): BatchNorm2d(original_name=BatchNorm2d)\n", - " (20): ReLU(original_name=ReLU)\n", - " (21): MaxPool2d(original_name=MaxPool2d)\n", - " (22): Conv2d(original_name=Conv2d)\n", - " (23): BatchNorm2d(original_name=BatchNorm2d)\n", - " (24): ReLU(original_name=ReLU)\n", - " (25): Conv2d(original_name=Conv2d)\n", - " (26): BatchNorm2d(original_name=BatchNorm2d)\n", - " (27): ReLU(original_name=ReLU)\n", - " (28): MaxPool2d(original_name=MaxPool2d)\n", - " )\n", - " (preconv): Conv2d(original_name=Conv2d)\n", - " (conv1): Conv2d(original_name=Conv2d)\n", - " (bn1): BatchNorm2d(original_name=BatchNorm2d)\n", - " (pool1): MaxPool2d(original_name=MaxPool2d)\n", - " (conv2): Conv2d(original_name=Conv2d)\n", - " (bn2): BatchNorm2d(original_name=BatchNorm2d)\n", - " (pool2): MaxPool2d(original_name=MaxPool2d)\n", - " (conv3a): Conv2d(original_name=Conv2d)\n", - " (bn3): BatchNorm2d(original_name=BatchNorm2d)\n", - " (conv3b): Conv2d(original_name=Conv2d)\n", - " (bn4): BatchNorm2d(original_name=BatchNorm2d)\n", - " (pool3): MaxPool2d(original_name=MaxPool2d)\n", - " (conv4a): Conv2d(original_name=Conv2d)\n", - " (bn5): BatchNorm2d(original_name=BatchNorm2d)\n", - " (conv4b): Conv2d(original_name=Conv2d)\n", - " (bn6): BatchNorm2d(original_name=BatchNorm2d)\n", - " (pool4): MaxPool2d(original_name=MaxPool2d)\n", - " (conv5a): Conv2d(original_name=Conv2d)\n", - " (bn7): BatchNorm2d(original_name=BatchNorm2d)\n", - " (conv5b): Conv2d(original_name=Conv2d)\n", - " (bn8): BatchNorm2d(original_name=BatchNorm2d)\n", - " (pool5): MaxPool2d(original_name=MaxPool2d)\n", - " (unpool5): MaxUnpool2d(original_name=MaxUnpool2d)\n", - " (donv5b): Conv2d(original_name=Conv2d)\n", - " (donv5a): Conv2d(original_name=Conv2d)\n", - " (unpool4): MaxUnpool2d(original_name=MaxUnpool2d)\n", - " (donv4b): Conv2d(original_name=Conv2d)\n", - " (donv4a): Conv2d(original_name=Conv2d)\n", - " (unpool3): MaxUnpool2d(original_name=MaxUnpool2d)\n", - " (donv3b): Conv2d(original_name=Conv2d)\n", - " (donv3a): Conv2d(original_name=Conv2d)\n", - " (unpool2): MaxUnpool2d(original_name=MaxUnpool2d)\n", - " (donv2): Conv2d(original_name=Conv2d)\n", - " (unpool1): MaxUnpool2d(original_name=MaxUnpool2d)\n", - " (donv1): Conv2d(original_name=Conv2d)\n", - " (output): Conv2d(original_name=Conv2d)\n", - ")" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "module" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "torch.onnx.export(\n", - " n,\n", - " example_forward_input,\n", - " \"AbsSummarizer.onnx\",\n", - " opset_version=11)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "graph torch_jit (\n", - " %input.1[FLOAT, 2x1x64x64]\n", - ") initializers (\n", - " %preconv.weight[FLOAT, 3x1x1x1]\n", - " %preconv.bias[FLOAT, 3]\n", - " %donv5b.weight[FLOAT, 512x1024x3x3]\n", - " %donv5b.bias[FLOAT, 512]\n", - " %donv5a.weight[FLOAT, 512x512x3x3]\n", - " %donv5a.bias[FLOAT, 512]\n", - " %donv4b.weight[FLOAT, 512x1024x3x3]\n", - " %donv4b.bias[FLOAT, 512]\n", - " %donv4a.weight[FLOAT, 256x512x3x3]\n", - " %donv4a.bias[FLOAT, 256]\n", - " %donv3b.weight[FLOAT, 256x512x3x3]\n", - " %donv3b.bias[FLOAT, 256]\n", - " %donv3a.weight[FLOAT, 128x256x3x3]\n", - " %donv3a.bias[FLOAT, 128]\n", - " %donv2.weight[FLOAT, 64x256x3x3]\n", - " %donv2.bias[FLOAT, 64]\n", - " %donv1.weight[FLOAT, 32x128x3x3]\n", - " %donv1.bias[FLOAT, 32]\n", - " %output.weight[FLOAT, 2x32x1x1]\n", - " %output.bias[FLOAT, 2]\n", - " %onnx::Conv_351[FLOAT, 64x3x3x3]\n", - " %onnx::Conv_352[FLOAT, 64]\n", - " %onnx::Conv_354[FLOAT, 128x64x3x3]\n", - " %onnx::Conv_355[FLOAT, 128]\n", - " %onnx::Conv_357[FLOAT, 256x128x3x3]\n", - " %onnx::Conv_358[FLOAT, 256]\n", - " %onnx::Conv_360[FLOAT, 256x256x3x3]\n", - " %onnx::Conv_361[FLOAT, 256]\n", - " %onnx::Conv_363[FLOAT, 512x256x3x3]\n", - " %onnx::Conv_364[FLOAT, 512]\n", - " %onnx::Conv_366[FLOAT, 512x512x3x3]\n", - " %onnx::Conv_367[FLOAT, 512]\n", - " %onnx::Conv_369[FLOAT, 512x512x3x3]\n", - " %onnx::Conv_370[FLOAT, 512]\n", - " %onnx::Conv_372[FLOAT, 512x512x3x3]\n", - " %onnx::Conv_373[FLOAT, 512]\n", - ") {\n", - " %onnx::Relu_77 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%input.1, %preconv.weight, %preconv.bias)\n", - " %onnx::Conv_78 = Relu(%onnx::Relu_77)\n", - " %onnx::Relu_350 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_78, %onnx::Conv_351, %onnx::Conv_352)\n", - " %onnx::MaxPool_81 = Relu(%onnx::Relu_350)\n", - " %input.4, %onnx::Sub_83 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_81)\n", - " %84, %onnx::Slice_85 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_81)\n", - " %onnx::Slice_86 = Constant[value = ]()\n", - " %onnx::Slice_87 = Constant[value = ]()\n", - " %onnx::Slice_88 = Constant[value = ]()\n", - " %onnx::Sub_89 = Slice(%onnx::Slice_85, %onnx::Slice_87, %onnx::Slice_88, %onnx::Slice_86)\n", - " %indices = Sub(%onnx::Sub_83, %onnx::Sub_89)\n", - " %onnx::Relu_353 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.4, %onnx::Conv_354, %onnx::Conv_355)\n", - " %onnx::MaxPool_93 = Relu(%onnx::Relu_353)\n", - " %input.12, %onnx::Sub_95 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_93)\n", - " %96, %onnx::Slice_97 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_93)\n", - " %onnx::Slice_98 = Constant[value = ]()\n", - " %onnx::Slice_99 = Constant[value = ]()\n", - " %onnx::Slice_100 = Constant[value = ]()\n", - " %onnx::Sub_101 = Slice(%onnx::Slice_97, %onnx::Slice_99, %onnx::Slice_100, %onnx::Slice_98)\n", - " %indices.3 = Sub(%onnx::Sub_95, %onnx::Sub_101)\n", - " %onnx::Relu_356 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.12, %onnx::Conv_357, %onnx::Conv_358)\n", - " %onnx::Conv_105 = Relu(%onnx::Relu_356)\n", - " %onnx::Relu_359 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_105, %onnx::Conv_360, %onnx::Conv_361)\n", - " %onnx::MaxPool_108 = Relu(%onnx::Relu_359)\n", - " %input.24, %onnx::Sub_110 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_108)\n", - " %111, %onnx::Slice_112 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_108)\n", - " %onnx::Slice_113 = Constant[value = ]()\n", - " %onnx::Slice_114 = Constant[value = ]()\n", - " %onnx::Slice_115 = Constant[value = ]()\n", - " %onnx::Sub_116 = Slice(%onnx::Slice_112, %onnx::Slice_114, %onnx::Slice_115, %onnx::Slice_113)\n", - " %indices.7 = Sub(%onnx::Sub_110, %onnx::Sub_116)\n", - " %onnx::Relu_362 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.24, %onnx::Conv_363, %onnx::Conv_364)\n", - " %onnx::Conv_120 = Relu(%onnx::Relu_362)\n", - " %onnx::Relu_365 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_120, %onnx::Conv_366, %onnx::Conv_367)\n", - " %onnx::MaxPool_123 = Relu(%onnx::Relu_365)\n", - " %input.36, %onnx::Sub_125 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_123)\n", - " %126, %onnx::Slice_127 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_123)\n", - " %onnx::Slice_128 = Constant[value = ]()\n", - " %onnx::Slice_129 = Constant[value = ]()\n", - " %onnx::Slice_130 = Constant[value = ]()\n", - " %onnx::Sub_131 = Slice(%onnx::Slice_127, %onnx::Slice_129, %onnx::Slice_130, %onnx::Slice_128)\n", - " %indices.11 = Sub(%onnx::Sub_125, %onnx::Sub_131)\n", - " %onnx::Relu_368 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.36, %onnx::Conv_369, %onnx::Conv_370)\n", - " %onnx::Conv_135 = Relu(%onnx::Relu_368)\n", - " %onnx::Relu_371 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_135, %onnx::Conv_372, %onnx::Conv_373)\n", - " %onnx::MaxPool_138 = Relu(%onnx::Relu_371)\n", - " %input.48, %onnx::Sub_140 = MaxPool[ceil_mode = 0, kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%onnx::MaxPool_138)\n", - " %141, %onnx::Slice_142 = MaxPool[kernel_shape = [1, 1], strides = [1, 1]](%onnx::MaxPool_138)\n", - " %onnx::Slice_143 = Constant[value = ]()\n", - " %onnx::Slice_144 = Constant[value = ]()\n", - " %onnx::Slice_145 = Constant[value = ]()\n", - " %onnx::Sub_146 = Slice(%onnx::Slice_142, %onnx::Slice_144, %onnx::Slice_145, %onnx::Slice_143)\n", - " %indices.15 = Sub(%onnx::Sub_140, %onnx::Sub_146)\n", - " %onnx::Gather_148 = Shape(%input.48)\n", - " %onnx::Gather_149 = Constant[value = ]()\n", - " %onnx::Gather_150 = Constant[value = ]()\n", - " %onnx::Range_151 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_149)\n", - " %onnx::Mul_152 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_150)\n", - " %onnx::Gather_153 = Constant[value = ]()\n", - " %onnx::Sub_154 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_153)\n", - " %onnx::Sub_155 = Constant[value = ]()\n", - " %onnx::Mul_156 = Sub(%onnx::Sub_154, %onnx::Sub_155)\n", - " %onnx::Mul_157 = Constant[value = ]()\n", - " %onnx::Add_158 = Mul(%onnx::Mul_156, %onnx::Mul_157)\n", - " %onnx::Add_159 = Constant[value = ]()\n", - " %onnx::Mul_160 = Add(%onnx::Add_158, %onnx::Add_159)\n", - " %onnx::Gather_161 = Constant[value = ]()\n", - " %onnx::Sub_162 = Gather[axis = 0](%onnx::Gather_148, %onnx::Gather_161)\n", - " %onnx::Sub_163 = Constant[value = ]()\n", - " %onnx::Mul_164 = Sub(%onnx::Sub_162, %onnx::Sub_163)\n", - " %onnx::Mul_165 = Constant[value = ]()\n", - " %onnx::Add_166 = Mul(%onnx::Mul_164, %onnx::Mul_165)\n", - " %onnx::Add_167 = Constant[value = ]()\n", - " %onnx::Mul_168 = Add(%onnx::Add_166, %onnx::Add_167)\n", - " %onnx::Mul_169 = Mul(%onnx::Mul_160, %onnx::Mul_168)\n", - " %onnx::Mul_170 = Mul(%onnx::Mul_169, %onnx::Mul_152)\n", - " %onnx::Reshape_171 = Range(%onnx::Gather_149, %onnx::Mul_152, %onnx::Gather_150)\n", - " %onnx::Reshape_172 = Constant[value = ]()\n", - " %onnx::Mul_173 = Reshape(%onnx::Reshape_171, %onnx::Reshape_172)\n", - " %onnx::Cast_174 = Mul(%onnx::Mul_173, %onnx::Mul_169)\n", - " %onnx::Add_175 = Cast[to = 7](%onnx::Cast_174)\n", - " %onnx::Reshape_176 = Range(%onnx::Gather_149, %onnx::Range_151, %onnx::Gather_150)\n", - " %onnx::Reshape_177 = Constant[value = ]()\n", - " %onnx::Mul_178 = Reshape(%onnx::Reshape_176, %onnx::Reshape_177)\n", - " %onnx::Cast_179 = Mul(%onnx::Mul_178, %onnx::Mul_170)\n", - " %onnx::Add_180 = Cast[to = 7](%onnx::Cast_179)\n", - " %onnx::Add_181 = Add(%indices.15, %onnx::Add_175)\n", - " %onnx::MaxUnpool_182 = Add(%onnx::Add_181, %onnx::Add_180)\n", - " %onnx::Concat_183 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%input.48, %onnx::MaxUnpool_182)\n", - " %input.52 = Concat[axis = 1](%onnx::Concat_183, %onnx::MaxPool_138)\n", - " %onnx::Relu_185 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.52, %donv5b.weight, %donv5b.bias)\n", - " %onnx::Conv_186 = Relu(%onnx::Relu_185)\n", - " %onnx::Relu_187 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_186, %donv5a.weight, %donv5a.bias)\n", - " %onnx::Shape_188 = Relu(%onnx::Relu_187)\n", - " %onnx::Gather_189 = Shape(%onnx::Shape_188)\n", - " %onnx::Gather_190 = Constant[value = ]()\n", - " %onnx::Gather_191 = Constant[value = ]()\n", - " %onnx::Range_192 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_190)\n", - " %onnx::Mul_193 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_191)\n", - " %onnx::Gather_194 = Constant[value = ]()\n", - " %onnx::Sub_195 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_194)\n", - " %onnx::Sub_196 = Constant[value = ]()\n", - " %onnx::Mul_197 = Sub(%onnx::Sub_195, %onnx::Sub_196)\n", - " %onnx::Mul_198 = Constant[value = ]()\n", - " %onnx::Add_199 = Mul(%onnx::Mul_197, %onnx::Mul_198)\n", - " %onnx::Add_200 = Constant[value = ]()\n", - " %onnx::Mul_201 = Add(%onnx::Add_199, %onnx::Add_200)\n", - " %onnx::Gather_202 = Constant[value = ]()\n", - " %onnx::Sub_203 = Gather[axis = 0](%onnx::Gather_189, %onnx::Gather_202)\n", - " %onnx::Sub_204 = Constant[value = ]()\n", - " %onnx::Mul_205 = Sub(%onnx::Sub_203, %onnx::Sub_204)\n", - " %onnx::Mul_206 = Constant[value = ]()\n", - " %onnx::Add_207 = Mul(%onnx::Mul_205, %onnx::Mul_206)\n", - " %onnx::Add_208 = Constant[value = ]()\n", - " %onnx::Mul_209 = Add(%onnx::Add_207, %onnx::Add_208)\n", - " %onnx::Mul_210 = Mul(%onnx::Mul_201, %onnx::Mul_209)\n", - " %onnx::Mul_211 = Mul(%onnx::Mul_210, %onnx::Mul_193)\n", - " %onnx::Reshape_212 = Range(%onnx::Gather_190, %onnx::Mul_193, %onnx::Gather_191)\n", - " %onnx::Reshape_213 = Constant[value = ]()\n", - " %onnx::Mul_214 = Reshape(%onnx::Reshape_212, %onnx::Reshape_213)\n", - " %onnx::Cast_215 = Mul(%onnx::Mul_214, %onnx::Mul_210)\n", - " %onnx::Add_216 = Cast[to = 7](%onnx::Cast_215)\n", - " %onnx::Reshape_217 = Range(%onnx::Gather_190, %onnx::Range_192, %onnx::Gather_191)\n", - " %onnx::Reshape_218 = Constant[value = ]()\n", - " %onnx::Mul_219 = Reshape(%onnx::Reshape_217, %onnx::Reshape_218)\n", - " %onnx::Cast_220 = Mul(%onnx::Mul_219, %onnx::Mul_211)\n", - " %onnx::Add_221 = Cast[to = 7](%onnx::Cast_220)\n", - " %onnx::Add_222 = Add(%indices.11, %onnx::Add_216)\n", - " %onnx::MaxUnpool_223 = Add(%onnx::Add_222, %onnx::Add_221)\n", - " %onnx::Concat_224 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_188, %onnx::MaxUnpool_223)\n", - " %input.56 = Concat[axis = 1](%onnx::Concat_224, %onnx::MaxPool_123)\n", - " %onnx::Relu_226 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.56, %donv4b.weight, %donv4b.bias)\n", - " %onnx::Conv_227 = Relu(%onnx::Relu_226)\n", - " %onnx::Relu_228 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_227, %donv4a.weight, %donv4a.bias)\n", - " %onnx::Shape_229 = Relu(%onnx::Relu_228)\n", - " %onnx::Gather_230 = Shape(%onnx::Shape_229)\n", - " %onnx::Gather_231 = Constant[value = ]()\n", - " %onnx::Gather_232 = Constant[value = ]()\n", - " %onnx::Range_233 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_231)\n", - " %onnx::Mul_234 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_232)\n", - " %onnx::Gather_235 = Constant[value = ]()\n", - " %onnx::Sub_236 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_235)\n", - " %onnx::Sub_237 = Constant[value = ]()\n", - " %onnx::Mul_238 = Sub(%onnx::Sub_236, %onnx::Sub_237)\n", - " %onnx::Mul_239 = Constant[value = ]()\n", - " %onnx::Add_240 = Mul(%onnx::Mul_238, %onnx::Mul_239)\n", - " %onnx::Add_241 = Constant[value = ]()\n", - " %onnx::Mul_242 = Add(%onnx::Add_240, %onnx::Add_241)\n", - " %onnx::Gather_243 = Constant[value = ]()\n", - " %onnx::Sub_244 = Gather[axis = 0](%onnx::Gather_230, %onnx::Gather_243)\n", - " %onnx::Sub_245 = Constant[value = ]()\n", - " %onnx::Mul_246 = Sub(%onnx::Sub_244, %onnx::Sub_245)\n", - " %onnx::Mul_247 = Constant[value = ]()\n", - " %onnx::Add_248 = Mul(%onnx::Mul_246, %onnx::Mul_247)\n", - " %onnx::Add_249 = Constant[value = ]()\n", - " %onnx::Mul_250 = Add(%onnx::Add_248, %onnx::Add_249)\n", - " %onnx::Mul_251 = Mul(%onnx::Mul_242, %onnx::Mul_250)\n", - " %onnx::Mul_252 = Mul(%onnx::Mul_251, %onnx::Mul_234)\n", - " %onnx::Reshape_253 = Range(%onnx::Gather_231, %onnx::Mul_234, %onnx::Gather_232)\n", - " %onnx::Reshape_254 = Constant[value = ]()\n", - " %onnx::Mul_255 = Reshape(%onnx::Reshape_253, %onnx::Reshape_254)\n", - " %onnx::Cast_256 = Mul(%onnx::Mul_255, %onnx::Mul_251)\n", - " %onnx::Add_257 = Cast[to = 7](%onnx::Cast_256)\n", - " %onnx::Reshape_258 = Range(%onnx::Gather_231, %onnx::Range_233, %onnx::Gather_232)\n", - " %onnx::Reshape_259 = Constant[value = ]()\n", - " %onnx::Mul_260 = Reshape(%onnx::Reshape_258, %onnx::Reshape_259)\n", - " %onnx::Cast_261 = Mul(%onnx::Mul_260, %onnx::Mul_252)\n", - " %onnx::Add_262 = Cast[to = 7](%onnx::Cast_261)\n", - " %onnx::Add_263 = Add(%indices.7, %onnx::Add_257)\n", - " %onnx::MaxUnpool_264 = Add(%onnx::Add_263, %onnx::Add_262)\n", - " %onnx::Concat_265 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_229, %onnx::MaxUnpool_264)\n", - " %input.60 = Concat[axis = 1](%onnx::Concat_265, %onnx::MaxPool_108)\n", - " %onnx::Relu_267 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.60, %donv3b.weight, %donv3b.bias)\n", - " %onnx::Conv_268 = Relu(%onnx::Relu_267)\n", - " %onnx::Relu_269 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%onnx::Conv_268, %donv3a.weight, %donv3a.bias)\n", - " %input.64 = Relu(%onnx::Relu_269)\n", - " %onnx::Gather_271 = Shape(%input.64)\n", - " %onnx::Gather_272 = Constant[value = ]()\n", - " %onnx::Gather_273 = Constant[value = ]()\n", - " %onnx::Range_274 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_272)\n", - " %onnx::Mul_275 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_273)\n", - " %onnx::Gather_276 = Constant[value = ]()\n", - " %onnx::Sub_277 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_276)\n", - " %onnx::Sub_278 = Constant[value = ]()\n", - " %onnx::Mul_279 = Sub(%onnx::Sub_277, %onnx::Sub_278)\n", - " %onnx::Mul_280 = Constant[value = ]()\n", - " %onnx::Add_281 = Mul(%onnx::Mul_279, %onnx::Mul_280)\n", - " %onnx::Add_282 = Constant[value = ]()\n", - " %onnx::Mul_283 = Add(%onnx::Add_281, %onnx::Add_282)\n", - " %onnx::Gather_284 = Constant[value = ]()\n", - " %onnx::Sub_285 = Gather[axis = 0](%onnx::Gather_271, %onnx::Gather_284)\n", - " %onnx::Sub_286 = Constant[value = ]()\n", - " %onnx::Mul_287 = Sub(%onnx::Sub_285, %onnx::Sub_286)\n", - " %onnx::Mul_288 = Constant[value = ]()\n", - " %onnx::Add_289 = Mul(%onnx::Mul_287, %onnx::Mul_288)\n", - " %onnx::Add_290 = Constant[value = ]()\n", - " %onnx::Mul_291 = Add(%onnx::Add_289, %onnx::Add_290)\n", - " %onnx::Mul_292 = Mul(%onnx::Mul_283, %onnx::Mul_291)\n", - " %onnx::Mul_293 = Mul(%onnx::Mul_292, %onnx::Mul_275)\n", - " %onnx::Reshape_294 = Range(%onnx::Gather_272, %onnx::Mul_275, %onnx::Gather_273)\n", - " %onnx::Reshape_295 = Constant[value = ]()\n", - " %onnx::Mul_296 = Reshape(%onnx::Reshape_294, %onnx::Reshape_295)\n", - " %onnx::Cast_297 = Mul(%onnx::Mul_296, %onnx::Mul_292)\n", - " %onnx::Add_298 = Cast[to = 7](%onnx::Cast_297)\n", - " %onnx::Reshape_299 = Range(%onnx::Gather_272, %onnx::Range_274, %onnx::Gather_273)\n", - " %onnx::Reshape_300 = Constant[value = ]()\n", - " %onnx::Mul_301 = Reshape(%onnx::Reshape_299, %onnx::Reshape_300)\n", - " %onnx::Cast_302 = Mul(%onnx::Mul_301, %onnx::Mul_293)\n", - " %onnx::Add_303 = Cast[to = 7](%onnx::Cast_302)\n", - " %onnx::Add_304 = Add(%indices.3, %onnx::Add_298)\n", - " %onnx::MaxUnpool_305 = Add(%onnx::Add_304, %onnx::Add_303)\n", - " %onnx::Concat_306 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%input.64, %onnx::MaxUnpool_305)\n", - " %input.68 = Concat[axis = 1](%onnx::Concat_306, %onnx::MaxPool_93)\n", - " %onnx::Relu_308 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.68, %donv2.weight, %donv2.bias)\n", - " %onnx::Shape_309 = Relu(%onnx::Relu_308)\n", - " %onnx::Gather_310 = Shape(%onnx::Shape_309)\n", - " %onnx::Gather_311 = Constant[value = ]()\n", - " %onnx::Gather_312 = Constant[value = ]()\n", - " %onnx::Range_313 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_311)\n", - " %onnx::Mul_314 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_312)\n", - " %onnx::Gather_315 = Constant[value = ]()\n", - " %onnx::Sub_316 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_315)\n", - " %onnx::Sub_317 = Constant[value = ]()\n", - " %onnx::Mul_318 = Sub(%onnx::Sub_316, %onnx::Sub_317)\n", - " %onnx::Mul_319 = Constant[value = ]()\n", - " %onnx::Add_320 = Mul(%onnx::Mul_318, %onnx::Mul_319)\n", - " %onnx::Add_321 = Constant[value = ]()\n", - " %onnx::Mul_322 = Add(%onnx::Add_320, %onnx::Add_321)\n", - " %onnx::Gather_323 = Constant[value = ]()\n", - " %onnx::Sub_324 = Gather[axis = 0](%onnx::Gather_310, %onnx::Gather_323)\n", - " %onnx::Sub_325 = Constant[value = ]()\n", - " %onnx::Mul_326 = Sub(%onnx::Sub_324, %onnx::Sub_325)\n", - " %onnx::Mul_327 = Constant[value = ]()\n", - " %onnx::Add_328 = Mul(%onnx::Mul_326, %onnx::Mul_327)\n", - " %onnx::Add_329 = Constant[value = ]()\n", - " %onnx::Mul_330 = Add(%onnx::Add_328, %onnx::Add_329)\n", - " %onnx::Mul_331 = Mul(%onnx::Mul_322, %onnx::Mul_330)\n", - " %onnx::Mul_332 = Mul(%onnx::Mul_331, %onnx::Mul_314)\n", - " %onnx::Reshape_333 = Range(%onnx::Gather_311, %onnx::Mul_314, %onnx::Gather_312)\n", - " %onnx::Reshape_334 = Constant[value = ]()\n", - " %onnx::Mul_335 = Reshape(%onnx::Reshape_333, %onnx::Reshape_334)\n", - " %onnx::Cast_336 = Mul(%onnx::Mul_335, %onnx::Mul_331)\n", - " %onnx::Add_337 = Cast[to = 7](%onnx::Cast_336)\n", - " %onnx::Reshape_338 = Range(%onnx::Gather_311, %onnx::Range_313, %onnx::Gather_312)\n", - " %onnx::Reshape_339 = Constant[value = ]()\n", - " %onnx::Mul_340 = Reshape(%onnx::Reshape_338, %onnx::Reshape_339)\n", - " %onnx::Cast_341 = Mul(%onnx::Mul_340, %onnx::Mul_332)\n", - " %onnx::Add_342 = Cast[to = 7](%onnx::Cast_341)\n", - " %onnx::Add_343 = Add(%indices, %onnx::Add_337)\n", - " %onnx::MaxUnpool_344 = Add(%onnx::Add_343, %onnx::Add_342)\n", - " %onnx::Concat_345 = MaxUnpool[kernel_shape = [2, 2], strides = [2, 2]](%onnx::Shape_309, %onnx::MaxUnpool_344)\n", - " %input.72 = Concat[axis = 1](%onnx::Concat_345, %onnx::MaxPool_81)\n", - " %onnx::Relu_347 = Conv[dilations = [1, 1], group = 1, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input.72, %donv1.weight, %donv1.bias)\n", - " %onnx::Conv_348 = Relu(%onnx::Relu_347)\n", - " %349 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%onnx::Conv_348, %output.weight, %output.bias)\n", - " return %349\n", - "}\n" - ] - } - ], - "source": [ - "import onnx\n", - "\n", - "# Load the ONNX model\n", - "model = onnx.load(\"AbsSummarizer.onnx\")\n", - "\n", - "# Check that the model is well formed\n", - "onnx.checker.check_model(model)\n", - "\n", - "# Print a human readable representation of the graph\n", - "print(onnx.helper.printable_graph(model.graph))" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'1.12.1'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "torch.__version__\n" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "ename": "AttributeError", - "evalue": "module 'openvino' has no attribute '__version__'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[20], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mimport\u001b[39;00m \u001b[39mopenvino\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m openvino\u001b[39m.\u001b[39;49m__version__\n", - "\u001b[0;31mAttributeError\u001b[0m: module 'openvino' has no attribute '__version__'" - ] - } - ], - "source": [ - "import openvino\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "openv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.15" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "0d646c53ccd8c179444f222988100eafa4a93600d3e2b5bf836e9c9f08adbeba" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py index 88d63fb37f4..22fe88c2612 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py @@ -15,7 +15,7 @@ plt.switch_backend('agg') -def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epochs=35,lrate=1e-4,adv=False): +def train_network(config): """Training function for SUMNet,UNet,R2Unet Parameters @@ -43,21 +43,29 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc None """ + + fold_no = config["fold_no"] fold = 'fold'+str(fold_no) - save_path = save_path+'/'+network+'/'+fold+'/' + save_path = config["save_path"] + json_path = config["json_path"] + datapath = config["datapath"] + lung_segpath = config["lung_segpath"] + network = config["network"] + lrate = config["lrate"] + adv = config["adv"] + epochs = config["epochs"] + + save_path = os.path.join(save_path, network, fold) if not os.path.isdir(save_path): os.makedirs(save_path) - with open(json_path+fold+'_pos_neg_eq.json') as f: + with open(json_path) as f: json_file = json.load(f) - train_set = json_file['train_set'] - val_set = json_file['valid_set'] - trainDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="train_set",img_size=512) valDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) trainDataLoader = data.DataLoader(trainDset,batch_size=4,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) - validDataLoader = data.DataLoader(valDset,batch_size=4,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) + validDataLoader = data.DataLoader(valDset,batch_size=1,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) if network == 'unet': net = U_Net(img_ch=1,output_ch=2) @@ -89,7 +97,6 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc validDiceCoeff_lungs = [] start = time.time() - bestValidDice = torch.zeros(1) bestValidDice_lungs = 0.0 @@ -114,7 +121,6 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out,dim=1) if adv: @@ -219,3 +225,5 @@ def train_network(fold_no,save_path,json_path,datapath,lung_segpath,network,epoc save_path=save_path, x_label='Epochs', y_label='Dice coefficient', plot_title='Dice coefficient', save_name='Dice_Plot.png') + return trainLoss + diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py index f795a6bde97..18066837aee 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py @@ -7,12 +7,13 @@ import time import os from tqdm import tqdm as tq +import json from .data_loader import LungPatchDataLoader from .models import LeNet from .utils import plot_graphs -def lungpatch_classifier(save_path,img_path,lrate=1e-4,epochs=35): +def lungpatch_classifier(config): """Trains network to classify patches based on the presence of nodule Parameters @@ -31,9 +32,17 @@ def lungpatch_classifier(save_path,img_path,lrate=1e-4,epochs=35): None """ + save_path = config["savepath"] + img_path = config["imgpath"] + lrate = config["lrate"] + epochs = config["epochs"] + json_path = config["jsonpath"] - trainDset = LungPatchDataLoader(img_path=img_path,is_transform=True,split="train_set") - valDset = LungPatchDataLoader(img_path=img_path,is_transform=True,split="valid_set") + with open(json_path) as f: + json_file = json.load(f) + + trainDset = LungPatchDataLoader(imgpath=img_path,json_file=json_file,is_transform=True,split="train_set") + valDset = LungPatchDataLoader(imgpath=img_path,json_file=json_file,is_transform=True,split="valid_set") trainDataLoader = data.DataLoader(trainDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) validDataLoader = data.DataLoader(valDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) @@ -175,3 +184,5 @@ def lungpatch_classifier(save_path,img_path,lrate=1e-4,epochs=35): train_values=trainAcc, valid_values=validAcc, save_path=save_path, x_label='Epochs', y_label='Accuracy', plot_title='Accuracy Plot', save_name='acc_plot.png') + + return trainLoss diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index b05b137f4e3..ce25e69817b 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -2,6 +2,7 @@ import matplotlib.pyplot as plt import numpy as np import torch +import json from .models import LeNet, R2U_Net, SUMNet, U_Net @@ -37,7 +38,6 @@ def dice_coefficient(pred1, target): return [score_1.mean()] - def load_model(network): if network == 'unet': @@ -72,4 +72,21 @@ def plot_graphs( plt.savefig(os.path.join(save_path, save_name)) plt.close() - \ No newline at end of file +def create_dummy_json_file(json_path,stage): + test_data_path = os.path.split(json_path)[0] + if stage == 1: + img_path = os.path.join(test_data_path,'stage1','img') + else: + img_path = os.path.join(test_data_path,'stage2','img') + file_list = os.listdir(img_path) + train_list = file_list[:7] + valid_list = file_list[7:10] + test_list = file_list[10:15] + dummy_dict = { + "train_set":train_list, + "valid_set": valid_list, + "test_set": test_list + } + + with open(json_path, 'w') as h: + json.dump(dummy_dict, h) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py index e69de29bb2d..f8a7c447ff5 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py @@ -0,0 +1,103 @@ +import unittest +import os +from src.utils.downloader import download_checkpoint +from src.utils.exporter import Exporter +from src.utils.get_config import get_config + +def create_export_test_for_stage1(): + class ExportTestStage1(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='export', stage=1) + if not os.path.exists(cls.config['checkpoint']): + download_checkpoint(stage=1) + cls.model_path = cls.config['checkpoint'] + + def test_export_onnx(self): + self.exporter = Exporter(self.config, stage=1) + self.exporter.export_model_onnx() + self.assertTrue(os.path.join(os.path.split(self.model_path)[ + 0], self.config.get('model_name_onnx'))) + + def test_export_ir(self): + self.exporter = Exporter(self.config, stage=1) + model_dir = os.path.split(self.config['checkpoint'])[0] + if not os.path.exists(os.path.join(model_dir, self.config.get('model_name_onnx'))): + self.exporter.export_model_onnx() + self.exporter.export_model_ir() + name_xml = self.config['model_name'] + '.xml' + name_bin = self.config['model_name'] + '.bin' + xml_status = os.path.exists(os.path.join(model_dir, name_xml)) + bin_status = os.path.exists(os.path.join(model_dir, name_bin)) + self.assertTrue(xml_status) + self.assertTrue(bin_status) + + def test_config(self): + self.config = get_config(action='export', stage=1) + self.model_path = self.config['checkpoint'] + self.input_shape = self.config['input_shape'] + self.output_dir = os.path.split(self.model_path)[0] + self.assertTrue(self.output_dir) + self.assertTrue(self.model_path) + self.assertListEqual(self.input_shape, [1, 1, 512, 512]) + return ExportTestStage1 + + +def create_export_test_for_stage2(): + class ExportTestStage2(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='export', stage=2) + if not os.path.exists(cls.config['checkpoint']): + download_checkpoint(stage=2) + cls.model_path = cls.config['checkpoint'] + + def test_export_onnx(self): + self.config = get_config(action='export', stage=2) + if not os.path.exists(self.config['checkpoint']): + download_checkpoint(stage=2) + self.exporter = Exporter(self.config, stage=2) + self.exporter.export_model_onnx() + checkpoint = os.path.split(self.config['checkpoint'])[0] + self.assertTrue(os.path.join( + checkpoint, self.config.get('model_name_onnx'))) + + def test_export_ir(self): + self.config = get_config(action='export', stage=2) + if not os.path.exists(self.config['checkpoint']): + download_checkpoint(stage=2) + self.exporter = Exporter(self.config, stage=2) + self.model_path = os.path.split(self.config['checkpoint'])[0] + if not os.path.exists(os.path.join(self.model_path, self.config.get('model_name_onnx'))): + self.exporter.export_model_onnx() + self.exporter.export_model_ir() + name_xml = self.config.get('model_name') + '.xml' + name_bin = self.config.get('model_name') + '.bin' + xml_status = os.path.exists( + os.path.join(self.model_path, name_xml)) + bin_status = os.path.exists( + os.path.join(self.model_path, name_bin)) + self.assertTrue(xml_status) + self.assertTrue(bin_status) + + def test_config(self): + self.config = get_config(action='export', stage=2) + self.model_path = self.config['checkpoint'] + self.input_shape = self.config['input_shape'] + self.output_dir = os.path.split(self.model_path)[0] + self.assertTrue(self.output_dir) + self.assertTrue(self.model_path) + self.assertListEqual(self.input_shape, [1, 1, 64, 64]) + return ExportTestStage2 + + +class TestExportStage1(create_export_test_for_stage1()): + 'Test case for stage1' + + +class TestExportStage2(create_export_test_for_stage2()): + 'Test case for stage2' + + +if __name__ == '__main__': + unittest.main() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py index e69de29bb2d..c3ea9e37915 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py @@ -0,0 +1,65 @@ +import unittest +import os +from src.utils.train_stage1 import train_network +from src.utils.train_stage2 import lungpatch_classifier +from src.utils.models import SUMNet, LeNet +from src.utils.downloader import download_checkpoint, download_data +from src.utils.get_config import get_config +from src.utils.utils import create_dummy_json_file + +def create_train_test_for_stage1(): + class TrainerTestStage1(unittest.TestCase): + @classmethod + def setUpClass(cls): + config = get_config(action='train', stage=1) + cls.config = config + if not os.path.exists(config["datapath"]): + download_data() + if not os.path.exists(config["json_path"]): + create_dummy_json_file(config["json_path"], stage=1) + + def test_trainer(self): + self.model = SUMNet(in_ch=1,out_ch=2) + loss_list= train_network(self.config) + self.assertLessEqual(loss_list[2], loss_list[0]) + + def test_config(self): + self.config = get_config(action='train', stage=1) + self.assertGreaterEqual(self.config["lrate"], 1e-8) + + return TrainerTestStage1 + +def create_train_test_for_stage2(): + class TrainerTestStage2(unittest.TestCase): + @classmethod + def setUpClass(cls): + config = get_config(action='train', stage=2) + cls.config = config + if not os.path.exists(config["imgpath"]): + download_data() + if not os.path.exists(config["jsonpath"]): + create_dummy_json_file(config["jsonpath"], stage=2) + + + def test_trainer(self): + self.model = LeNet() + loss_list = lungpatch_classifier(self.config) + self.assertLessEqual(loss_list[2], loss_list[0]) + + def test_config(self): + self.config = get_config(action='train', stage=2) + self.assertGreaterEqual(self.config["lrate"], 1e-8) + + return TrainerTestStage2 + + +class TestTrainerStage1(create_train_test_for_stage1()): + 'Test case for stage1' + +class TestTrainerStage2(create_train_test_for_stage2()): + 'Test case for stage2' + + +if __name__ == '__main__': + + unittest.main() From 0b48ba94df02a60df8a9dd88fb552c68958bab43 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Fri, 13 Jan 2023 02:24:39 +0530 Subject: [PATCH 07/47] added unit tests for inference --- .../configs/stage1_config.json | 3 + .../configs/stage2_config.json | 2 +- .../src/utils/exporter.py | 3 +- .../src/utils/infer_stage1.py | 79 ++-- .../src/utils/infer_stage2.py | 62 +-- .../.github/FUNDING.yml | 13 + .../.github/workflows/main.yml | 208 ++++++++++ .../src/utils/openvino_pytorch_layers/LICENSE | 201 ++++++++++ .../utils/openvino_pytorch_layers/README.md | 41 ++ .../utils/openvino_pytorch_layers/compare.py | 39 ++ .../examples/calculate_grid/calculate_grid.py | 24 ++ .../examples/calculate_grid/export_model.py | 40 ++ .../examples/complex_mul/complex_mul.py | 22 ++ .../examples/complex_mul/export_model.py | 43 ++ .../deformable_conv/deformable_conv.py | 51 +++ .../examples/deformable_conv/export_model.py | 111 ++++++ .../examples/fft/export_model.py | 45 +++ .../examples/fft/fft.py | 73 ++++ .../examples/grid_sample/export_model.py | 50 +++ .../examples/grid_sample/grid_sample.py | 12 + .../examples/sparse_conv/export_model.py | 69 ++++ .../examples/sparse_conv/sparse_conv.py | 55 +++ .../examples/unpool/README.md | 47 +++ .../examples/unpool/export_model.py | 56 +++ .../examples/unpool/unpool.py | 17 + .../mo_extensions/front/onnx/max_unpool.py | 46 +++ .../mo_extensions/ops/MaxPoolGrad.py | 24 ++ .../openvino_extensions/__init__.py | 23 ++ .../utils/openvino_pytorch_layers/setup.py | 21 + .../tests/requirements.txt | 5 + .../tests/run_tests.py | 147 +++++++ .../user_ie_extensions/CMakeLists.txt | 27 ++ .../user_ie_extensions/calculate_grid.cpp | 79 ++++ .../user_ie_extensions/calculate_grid.hpp | 29 ++ .../user_ie_extensions/complex_mul.cpp | 89 +++++ .../user_ie_extensions/complex_mul.hpp | 30 ++ .../user_ie_extensions/fft.cpp | 372 ++++++++++++++++++ .../user_ie_extensions/fft.hpp | 36 ++ .../user_ie_extensions/grid_sample.cpp | 125 ++++++ .../user_ie_extensions/grid_sample.hpp | 30 ++ .../user_ie_extensions/ov_extension.cpp | 38 ++ .../user_ie_extensions/sparse_conv.cpp | 109 +++++ .../user_ie_extensions/sparse_conv.hpp | 33 ++ .../sparse_conv_transpose.cpp | 109 +++++ .../sparse_conv_transpose.hpp | 33 ++ .../user_ie_extensions/unpool.cpp | 86 ++++ .../user_ie_extensions/unpool.hpp | 41 ++ .../src/utils/train_stage1.py | 10 +- .../src/utils/train_stage2.py | 2 +- .../lung_nodule_detection/src/utils/utils.py | 24 +- .../tests/test_export.py | 12 +- .../tests/test_inference.py | 66 ++++ 52 files changed, 2934 insertions(+), 78 deletions(-) create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp create mode 100644 misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json index 6983532195f..a8f638953a1 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage1_config.json @@ -12,7 +12,10 @@ "inference":{ "fold_no": 4, "save_path": "temp_data/stage1/", + "data_path": "downloads/test_data/stage1/", + "lung_segpath": "downloads/test_data/stage1/mask/", "json_path": "downloads/test_data/fold4_pos_neg_eq.json", + "onnx_checkpoint": "downloads/model_weights/stage1/lung_seg.onnx", "network": "sumnet" }, "export":{ diff --git a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json index 180f05b1247..50b8f0239f7 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json +++ b/misc/pytorch_toolkit/lung_nodule_detection/configs/stage2_config.json @@ -7,7 +7,7 @@ "network": "lenet" }, "inference":{ - "modelpath":"temp_data/stage2/", + "modelpath":"downloads/model_weights/stage2/", "imgpath":"downloads/test_data/stage2/", "jsonpath": "downloads/test_data/patch_data_split.json", "network": "lenet" diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py index 4e6d1a788fe..2cc0e0f7477 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/exporter.py @@ -17,7 +17,8 @@ def export_model_ir(self): os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) input_shape = self.config.get('input_shape') output_dir = os.path.split(self.checkpoint)[0] - openvino_extension_path = '/home/deeptensor/rakshith_codes/training_extensions/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions' + current_dir = os.path.abspath(os.path.dirname(__file__)) + openvino_extension_path = os.path.join(current_dir,'openvino_pytorch_layers','mo_extensions') export_command = f"""mo \ --framework onnx \ --input_model {input_model} \ diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py index 00cd7ba42bd..987cab7e846 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py @@ -1,7 +1,7 @@ import torch from torch.utils import data import torch.nn.functional as F -from torch.autograd import Variable +from torchvision import transforms import os import numpy as np from tqdm import tqdm as tq @@ -9,10 +9,14 @@ import json from .models import SUMNet, U_Net, R2U_Net from .data_loader import LungDataLoader -from .utils import dice_coefficient +from .utils import dice_coefficient, load_inference_model, load_checkpoint + plt.switch_backend('agg') +def to_numpy(tensor): + return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() + -def infer_lungseg(fold_no,save_path,network,jsonpath): +def infer_lungseg(config, run_type='pytorch'): """ Inference script for lung segmentation Parameters @@ -31,49 +35,64 @@ def infer_lungseg(fold_no,save_path,network,jsonpath): None """ - + fold_no = config["fold_no"] + save_path = config["save_path"] + network = config["network"] + print(network) + jsonpath = config["json_path"] + datapath = config["data_path"] + lung_segpath = config["lung_segpath"] fold = 'fold'+str(fold_no) - save_path = os.path.join(save_path,network,fold) if not os.path.isdir(save_path): os.makedirs(save_path) - with open(os.path.join(jsonpath,fold+'_pos_neg_eq.json')) as f: + with open(jsonpath) as f: json_file = json.load(f) - - testDset = LungDataLoader(is_transform=True,json_file=json_file,split="test_set",img_size=512) - testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) + testDset = LungDataLoader(datapath=datapath,lung_path = lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) + testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) testBatches = 0 testDice_lungs = 0 - - if network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) - elif network == 'unet': - net = U_Net(img_ch=1,output_ch=2) - else: - net = R2U_Net(img_ch=1,output_ch=2) - - dice_list = [] use_gpu = torch.cuda.is_available() - if use_gpu: - net = net.cuda() + if run_type == 'pytorch': + if network == 'sumnet': + net = SUMNet(in_ch=1,out_ch=2) + elif network == 'unet': + net = U_Net(img_ch=1,output_ch=2) + else: + net = R2U_Net(img_ch=1,output_ch=2) + if use_gpu: + net = net.cuda() + net = load_checkpoint(net,save_path+network+'_best_lungs.pt') - net.load_state_dict(torch.load(save_path+network+'_best_lungs.pt')) + elif run_type == 'onnx': + net = load_inference_model(config,run_type='onnx') + else: + net = load_inference_model(config,run_type='ir') for data1 in tq(testDataLoader): - - imgs, mask = data1 - labels = mask - if use_gpu: - inputs = imgs.cuda() - labels = labels.cuda() - - net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out.data,dim=1) + inputs, labels = data1 + to_tensor = transforms.ToTensor() + if run_type == 'pytorch': + if use_gpu: + inputs = inputs.cuda() + + net_out = net(inputs) + net_out_sf = F.softmax(net_out.data,dim=1).detach().cpu() + elif run_type == 'ir': + net_out = net.infer(inputs={'input': inputs})['output'] + net_out = torch.tensor(net_out) + net_out_sf = F.softmax(net_out.data,dim=0) + else: + ort_inputs = {net.get_inputs()[0].name: to_numpy(inputs)} + net_out = net.run(None, ort_inputs) + net_out = np.array(net_out) + net_out = to_tensor(net_out).squeeze(1).transpose(dim0=1, dim1=0) + net_out_sf = F.softmax(net_out.data,dim=1) test_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) pred_max = torch.argmax(net_out_sf, dim=1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py index 8c752e794b1..717d52476ef 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py @@ -6,22 +6,35 @@ from torch.autograd import Variable import numpy as np from tqdm import tqdm as tq +import json from sklearn.metrics import confusion_matrix from .data_loader import LungPatchDataLoader from .models import LeNet +from .utils import load_inference_model -def lungpatch_classifier(modelpath,imgpath): +def lungpatch_classifier(config, run_type): + imgpath = config["imgpath"] + modelpath = config["modelpath"] + jsonpath = config["jsonpath"] - testDset = LungPatchDataLoader(imgpath,is_transform=True,split="test") + with open(jsonpath) as f: + json_file = json.load(f) + + testDset = LungPatchDataLoader(imgpath,json_file,is_transform=True,split="test_set") testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True) classification_model_loadPath = modelpath - net = LeNet() use_gpu = torch.cuda.is_available() + if run_type == 'pytorch': + net = LeNet() + if use_gpu: + net = net.cuda() + net.load_state_dict(torch.load(classification_model_loadPath+'lenet_best.pt')) + elif run_type == 'onnx': + net = load_inference_model(config, run_type='onnx') + else: + net = load_inference_model(config, run_type='ir') - if use_gpu: - net = net.cuda() - net.load_state_dict(torch.load(classification_model_loadPath+'lenet_best.pt')) optimizer = optim.Adam(net.parameters(), lr = 1e-4, weight_decay = 1e-5) criterion = nn.BCEWithLogitsLoss() @@ -32,13 +45,22 @@ def lungpatch_classifier(modelpath,imgpath): pred_arr = [] label_arr = [] for data1 in tq(testDataLoader): - img, label = data1 - if use_gpu: - inputs = img.cuda() - label = label.float() - label = label.cuda() - - net_out = net(Variable(inputs)) + inputs, label = data1 + + if run_type == 'pytorch': + if use_gpu: + inputs = inputs.cuda() + label = label.float() + label = label.cuda() + net_out = net(Variable(inputs)) + elif run_type == 'ir': + net_out = net.infer(inputs={'input': inputs})['output'] + net_out = torch.tensor(net_out) + else: + ort_inputs = {net.get_inputs()[0].name: to_numpy(inputs)} + net_out = net.run(None, ort_inputs) + net_out = np.array(net_out) + net_out = torch.tensor(net_out) net_loss = criterion(net_out,label) preds = torch.zeros(net_out.shape).cuda() @@ -62,16 +84,4 @@ def lungpatch_classifier(modelpath,imgpath): print(' Loss: {:.4f} | accuracy: {:.4f} '.format( testepoch_loss,testepoch_acc)) - - tn, fp, fn, tp = confusion_matrix(np.array(label_arr).flatten(), np.array(pred_arr).flatten()).ravel() - - print('True Negative :',tn) - print('false Negative :',fn) - print('True positive :',tp) - print('False positive :',fp) - specificity = tn/(tn+fp) - sensitivity = tp/(tp+fn) - print('Specificity :',specificity) - print('Sensitivity :',sensitivity) - - return testepoch_acc, specificity, sensitivity + return testepoch_acc diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml new file mode 100644 index 00000000000..7a26b0e5570 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: https://www.buymeacoffee.com/dkurt diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml new file mode 100644 index 00000000000..a089fe88281 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/.github/workflows/main.yml @@ -0,0 +1,208 @@ +# This is a basic workflow to help you get started with Actions + +name: CI + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + OPENVINO_VERSION: 2022.1.0 + OPENCV_VERSION: 4.5.5 + VERSION: 2022.1.0.dev3 + DIST_WIN: https://registrationcenter-download.intel.com/akdlm/irc_nas/18618/w_openvino_toolkit_p_2022.1.0.643_offline.exe + DIST_MAC: https://registrationcenter-download.intel.com/akdlm/irc_nas/18616/m_openvino_toolkit_p_2022.1.0.643_offline.dmg + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + build_lnx: + runs-on: ubuntu-18.04 + container: + centos:centos8.4.2105 + + steps: + - uses: actions/checkout@v2 + + - name: Install dependencies + run: | + dnf -y --disablerepo '*' --enablerepo=extras swap centos-linux-repos centos-stream-repos + dnf -y distro-sync + yum group install -y "Development Tools" --nobest + yum install -y python3 wget cmake + python3 -m pip install --upgrade pip + + - name: Install OpenVINO + run: | + tee > /tmp/openvino-2022.repo << EOF + [OpenVINO] + name=Intel(R) Distribution of OpenVINO 2022 + baseurl=https://yum.repos.intel.com/openvino/2022 + enabled=1 + gpgcheck=1 + repo_gpgcheck=1 + gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + EOF + mv /tmp/openvino-2022.repo /etc/yum.repos.d + yum repolist | grep -i openvino + yum install -y yum-utils openvino-2022.1.0 + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + make -j$(nproc --all) install + + - name: Build CPU extensions + run: | + source /opt/intel/openvino_2022/setupvars.sh + cd user_ie_extensions + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release + make -j$(nproc --all) + + - name: Build wheel + run: | + python3 -m pip install wheel + EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.so python3 setup.py build bdist_wheel + mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-manylinux2014_x86_64.whl + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_lnx" + path: "*.whl" + + build_win: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v2 + + - name: Install OpenVINO + run: | + Invoke-WebRequest ${{env.DIST_WIN}} -OutFile openvino.exe + Start-Process -Wait -FilePath "openvino.exe" -ArgumentList "-s -a --silent --eula accept" + shell: pwsh + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ..\\opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + cmake --build . --config Release -j 2 + cmake --install . --prefix "C:\opencv_install" + shell: cmd + + - name: Build CPU extensions + run: | + call "C:\Program Files (x86)\Intel\openvino_2022\setupvars.bat" + cd user_ie_extensions + mkdir build && cd build + cmake .. -DOpenCV_DIR="C:\opencv_install" + cmake --build . --config Release -j 2 + shell: cmd + + - name: Build wheel + run: | + python3 -m pip install --upgrade pip + python3 -m pip install wheel + ls user_ie_extensions\build\Release + set EXT_LIB=user_ie_extensions\\build\\Release\\user_cpu_extension.dll + python3 setup.py build bdist_wheel + move dist\\*.whl openvino_extensions-${{env.VERSION}}-py3-none-win_amd64.whl + shell: cmd + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_win" + path: "*.whl" + + build_mac: + runs-on: macos-10.15 + + steps: + - uses: actions/checkout@v2 + + - name: Install OpenVINO + run: | + curl ${{env.DIST_MAC}} -o openvino.dmg + hdiutil attach openvino.dmg + cd /Volumes/m_openvino_toolkit_p_2022.1.0.643_offline/bootstrapper.app/Contents/MacOS/ + sudo ./install.sh -s --eula=accept + + - name: Build OpenCV + run: | + git clone https://github.com/opencv/opencv/ -b ${{env.OPENCV_VERSION}} --depth 1 + mkdir opencv_build && cd opencv_build + cmake ../opencv -DCMAKE_BUILD_TYPE=Release -DBUILD_LIST=core + make -j$(nproc --all) install + + - name: Build CPU extensions + run: | + source /opt/intel/openvino_2022/setupvars.sh + cd user_ie_extensions + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release + make -j$(nproc --all) + + - name: Build wheel + run: | + python3 -m pip install --upgrade pip + python3 -m pip install wheel + ls user_ie_extensions/build/ + EXT_LIB=user_ie_extensions/build/libuser_cpu_extension.dylib python3 setup.py build bdist_wheel + mv dist/*.whl openvino_extensions-${{env.VERSION}}-py3-none-macosx_10_15_x86_64.whl + + - uses: actions/upload-artifact@v2 + with: + name: "wheel_mac" + path: "*.whl" + + test_lnx: + needs: build_lnx + runs-on: ubuntu-18.04 + + steps: + - uses: actions/checkout@v2 + + - uses: actions/download-artifact@v2 + with: + name: wheel_lnx + + - name: Install dependencies + run: | + sudo apt-get install -y python3-setuptools libopencv-dev + python3 -m pip install --upgrade pip + python3 -m pip install -r tests/requirements.txt + python3 -m pip install -U protobuf + python3 -m pip install openvino-dev[onnx]==${{env.OPENVINO_VERSION}} + + # Also, remove "openvino_extensions" folder to avoid import confusion + - name: Install CPU extensions + run: | + rm -r openvino_extensions + python3 -m pip install *.whl + + - name: Test + run: | + python3 -m pytest tests/run_tests.py + + publish: + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} + needs: [test_lnx, build_win, build_mac] + runs-on: ubuntu-18.04 + steps: + - uses: actions/download-artifact@v2 + + - name: Publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + python3 -m pip install --upgrade pip + python3 -m pip install twine + python3 -m twine upload wheel*/*.whl --skip-existing diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md new file mode 100644 index 00000000000..b7cc66c5f79 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/README.md @@ -0,0 +1,41 @@ +Repository with guides to enable some layers from PyTorch in Intel OpenVINO: + +[![CI](https://github.com/dkurt/openvino_pytorch_layers/workflows/CI/badge.svg?branch=master)](https://github.com/dkurt/openvino_pytorch_layers/actions?query=branch%3Amaster) + +* [nn.MaxUnpool2d](examples/unpool) +* [torch.fft](examples/fft) +* [nn.functional.grid_sample](https://github.com/dkurt/openvino_pytorch_layers/tree/master/examples/grid_sample) +* [torchvision.ops.DeformConv2d](examples/deformable_conv) +* [SparseConv](examples/sparse_conv) from [Open3D](https://github.com/isl-org/Open3D) + + +## OpenVINO Model Optimizer extension + +To create OpenVINO IR, use extra `--extension` flag to specify a path to Model Optimizer extensions that perform graph transformations and register custom layers. + +```bash +mo --input_model model.onnx --extension openvino_pytorch_layers/mo_extensions +``` + +## Custom CPU extensions + +You also need to build CPU extensions library which actually has C++ layers implementations: +```bash +source /opt/intel/openvino_2022/setupvars.sh + +cd user_ie_extensions +mkdir build && cd build +cmake .. -DCMAKE_BUILD_TYPE=Release && make -j$(nproc --all) +``` + +Add compiled extensions library to your project: + +```python +from openvino.runtime import Core + +core = Core() +core.add_extension('user_ie_extensions/build/libuser_cpu_extension.so') + +model = ie.read_model('model.xml') +compiled_model = ie.compile_model(model, 'CPU') +``` diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py new file mode 100644 index 00000000000..13fe7c81232 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/compare.py @@ -0,0 +1,39 @@ +# NOTE: import order is critical for now: extensions, openvino and only then numpy +from openvino_extensions import get_extensions_path +from openvino.inference_engine import IECore + +import argparse +import numpy as np + +parser = argparse.ArgumentParser(description='Compare OpenVINO implementation with reference data') +parser.add_argument('--num_inputs', type=int, default=1) +parser.add_argument('-m', '--model', default="model.xml") +parser.add_argument('-d', '--device', default="CPU") +args = parser.parse_args() + +inputs = {} +shapes = {} +for i in range(args.num_inputs): + suffix = '{}'.format(i if i > 0 else '') + data = np.load('inp' + suffix + '.npy') + inputs['input' + suffix] = data + shapes['input' + suffix] = data.shape + +ref = np.load('ref.npy') + +ie = IECore() +ie.add_extension(get_extensions_path(), 'CPU') +ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') + +net = ie.read_network(args.model) +net.reshape(shapes) +exec_net = ie.load_network(net, args.device) + +out = exec_net.infer(inputs) +out = next(iter(out.values())) + +maxdiff = np.max(np.abs(ref - out)) +print('Reference range: [{}, {}]'.format(np.min(ref), np.max(ref))) +print('Maximal difference:', maxdiff) +if maxdiff > 1e-5: + exit(1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py new file mode 100644 index 00000000000..a6c55adcf97 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/calculate_grid.py @@ -0,0 +1,24 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class CalculateGrid(torch.autograd.Function): + @staticmethod + def symbolic(g, in_positions): + return g.op("CalculateGrid", in_positions) + + @staticmethod + def forward(self, in_positions): + filter = torch.Tensor([[-1, -1, -1], [-1, -1, 0], [-1, 0, -1], [-1, 0, 0], + [0, -1, -1], [0, -1, 0], [0, 0, -1], + [0, 0, 0]]).to(in_positions.device) + + out_pos = in_positions.long().repeat(1, filter.shape[0]).reshape(-1, 3) + filter = filter.repeat(in_positions.shape[0], 1) + + out_pos = out_pos + filter + out_pos = out_pos[out_pos.min(1).values >= 0] + out_pos = out_pos[(~((out_pos.long() % 2).bool()).any(1))] + out_pos = torch.unique(out_pos, dim=0) + + return out_pos + 0.5 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py new file mode 100644 index 00000000000..e7a3fd9d40e --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/calculate_grid/export_model.py @@ -0,0 +1,40 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .calculate_grid import CalculateGrid + + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.calculate_grid = CalculateGrid() + + def forward(self, x): + return self.calculate_grid.apply(x) + + +def export(num_points, max_grid_extent): + # Generate a list of unique positions and add a mantissa + np.random.seed(32) + torch.manual_seed(11) + + inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) + inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) + + model = MyModel() + with torch.no_grad(): + torch.onnx.export(model, (inp_pos), 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp_pos).detach().numpy() + + # Pad values with espetial end line (-1, 0, 0) and zeros + ref = np.concatenate((ref, [[-1, 0, 0]])) + ref = np.pad(ref, ((0, inp_pos.shape[0] - ref.shape[0]), (0, 0))) + + np.save('inp', inp_pos.detach().numpy()) + np.save('ref', ref) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py new file mode 100644 index 00000000000..d0a854b6c50 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/complex_mul.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + +class ComplexMul(torch.autograd.Function): + @staticmethod + def symbolic(g, input_tensor, other_tensor, is_conj = True): + return g.op("ComplexMultiplication", input_tensor, other_tensor, is_conj_i=int(is_conj)) + + @staticmethod + def forward(self, input_tensor, other_tensor): + complex_index = -1 + real_part = input_tensor[..., 0] * other_tensor[..., 0] - input_tensor[..., 1] * other_tensor[..., 1] + imaginary_part = input_tensor[..., 0] * other_tensor[..., 1] + input_tensor[..., 1] * other_tensor[..., 0] + + multiplication = torch.cat( + [ + real_part.unsqueeze(dim=complex_index), + imaginary_part.unsqueeze(dim=complex_index), + ], + dim=complex_index, + ) + return multiplication diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py new file mode 100644 index 00000000000..564d93342e6 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/complex_mul/export_model.py @@ -0,0 +1,43 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .complex_mul import ComplexMul + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.complex_mul = ComplexMul() + + def forward(self, x, y): + return self.complex_mul.apply(x, y) + +def export(inp_shape=[3, 2, 4, 8, 2], other_shape=[3, 2, 4, 8, 2]): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel() + inp = Variable(torch.randn(inp_shape)) + inp1 = Variable(torch.randn(other_shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, (inp, inp1), 'model.onnx', + input_names=['input', 'input1'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp, inp1) + np.save('inp', inp.detach().numpy()) + np.save('inp1', inp1.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--inp_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) + parser.add_argument('--other_shape', type=int, nargs='+', default=[3, 2, 4, 8, 2]) + args = parser.parse_args() + + export(args.inp_shape, args.other_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py new file mode 100644 index 00000000000..fce9fa679ea --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/deformable_conv.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn +import torchvision.ops as ops + + +class DeformableConvFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, x, offset): + weight = cls.state_dict()["weight"] + weight = g.op("Constant", value_t=weight) + + return g.op( + "DeformableConv2D", + x, + offset, + weight, + strides_i=(cls.stride, cls.stride), + pads_i=(cls.padding, cls.padding, cls.padding, cls.padding), + dilations_i=(cls.dilation, cls.dilation), + deformable_group_i=cls.groups, + ) + + @staticmethod + def forward(self, cls, x, offset): + y = cls.origin_forward(x, offset) + return y + + +class DeformableConvolution(ops.DeformConv2d): + """ + This is a support class which helps export network with SparseConv in ONNX format. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + self.stride = kwargs.get("stride", 1) + self.padding = kwargs.get("padding", 0) + self.dilation = kwargs.get("dilation", 1) + self.groups = kwargs.get("groups", 1) + self.pad_l = nn.ConstantPad2d((1, 1, 1, 1), 0) + + def forward(self, x, offset): + """ + Using paddings is a workaround for 2021.4 release. + """ + x = self.pad_l(x) + offset = self.pad_l(offset) + y = DeformableConvFunc.apply(self, x, offset) + y = y[:, :, 1:-1, 1:-1] + return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py new file mode 100644 index 00000000000..a7630adedc0 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/deformable_conv/export_model.py @@ -0,0 +1,111 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .deformable_conv import DeformableConvolution + +np.random.seed(324) +torch.manual_seed(32) + + +class MyModel(nn.Module): + def __init__( + self, + inplanes, + outplanes, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + bias=False, + deformable_groups=1, + ): + super(MyModel, self).__init__() + self.def_conv = DeformableConvolution( + inplanes, + outplanes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + groups=deformable_groups, + ) + + def forward(self, x, offset): + y = self.def_conv(x, offset) + return y + + +def export( + inplanes, + outplanes, + kernel_size, + stride, + padding, + dilation, + deformable_groups, + inp_shape, + offset_shape, +): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel( + inplanes, + outplanes, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + deformable_groups=deformable_groups, + ) + model.eval() + + x = Variable(torch.randn(inp_shape)) + offset = Variable(torch.randn(offset_shape)) + ref = model(x, offset) + + np.save("inp", x.detach().numpy()) + np.save("inp1", offset.detach().numpy()) + np.save("ref", ref.detach().numpy()) + + with torch.no_grad(): + torch.onnx.export( + model, + (x, offset), + "model.onnx", + input_names=["input", "input1"], + output_names=["output"], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH, + opset_version=12, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate ONNX model and test data") + parser.add_argument("--inp_shape", type=int, nargs="+", default=[1, 15, 128, 240]) + parser.add_argument( + "--offset_shape", type=int, nargs="+", default=[1, 18, 128, 240] + ) + parser.add_argument("--inplanes", type=int, nargs="+", default=15) + parser.add_argument("--outplanes", type=int, nargs="+", default=15) + parser.add_argument("--kernel_size", type=int, nargs="+", default=3) + parser.add_argument("--stride", type=int, nargs="+", default=1) + parser.add_argument("--padding", type=int, nargs="+", default=1) + parser.add_argument("--dilation", type=int, nargs="+", default=1) + parser.add_argument("--deformable_groups", type=int, nargs="+", default=1) + args = parser.parse_args() + + export( + args.inplanes, + args.outplanes, + args.kernel_size, + args.stride, + args.padding, + args.dilation, + args.deformable_groups, + args.inp_shape, + args.offset_shape, + ) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py new file mode 100644 index 00000000000..252c6c61207 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/export_model.py @@ -0,0 +1,45 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .fft import FFT + + +class MyModel(nn.Module): + def __init__(self, inverse, centred, dims): + super(MyModel, self).__init__() + self.inverse = inverse + self.centred = centred + self.dims = dims + self.fft = FFT() + + def forward(self, x): + return self.fft.apply(x, self.inverse, self.centred, self.dims) + + +def export(shape, inverse, centered, dims): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel(inverse, centered, dims) + inp = Variable(torch.randn(shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, inp, 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH) + + ref = model(inp) + np.save('inp', inp.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8, 2]) + args = parser.parse_args() + + export(args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py new file mode 100644 index 00000000000..ccc6c872bd5 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/fft/fft.py @@ -0,0 +1,73 @@ +import torch +from packaging import version +from typing import List, Tuple, Union + +def roll( + data: torch.Tensor, + shift: Union[int, Union[Tuple[int, ...], List[int]]], + dims: Union[int, Union[Tuple, List]], +) -> torch.Tensor: + """ + Similar to numpy roll but applies to pytorch tensors. + Parameters + ---------- + data : torch.Tensor + shift: tuple, int + dims : tuple, list or int + + Returns + ------- + torch.Tensor + """ + if isinstance(shift, (tuple, list)) and isinstance(dims, (tuple, list)): + if len(shift) != len(dims): + raise ValueError(f"Length of shifts and dimensions should be equal. Got {len(shift)} and {len(dims)}.") + for curr_shift, curr_dim in zip(shift, dims): + data = roll(data, curr_shift, curr_dim) + return data + dim_index = dims + shift = shift % data.size(dims) + + if shift == 0: + return data + left_part = data.narrow(dim_index, 0, data.size(dims) - shift) + right_part = data.narrow(dim_index, data.size(dims) - shift, shift) + return torch.cat([right_part, left_part], dim=dim_index) + +def fftshift(data: torch.Tensor, dims) -> torch.Tensor: + shift = [data.size(curr_dim) // 2 for curr_dim in dims] + return roll(data, shift, dims) + +def ifftshift(data: torch.Tensor, dims) -> torch.Tensor: + shift = [(data.size(curr_dim) + 1) // 2 for curr_dim in dims] + return roll(data, shift, dims) + +class FFT(torch.autograd.Function): + @staticmethod + def symbolic(g, x, inverse, centered, dims): + dims = torch.tensor(dims) + dims = g.op("Constant", value_t=dims) + + return g.op('FFT', x, dims, inverse_i=inverse, centered_i=centered) + + @staticmethod + def forward(self, x, inverse, centered, dims): + # https://pytorch.org/docs/stable/torch.html#torch.fft + if centered: + x = ifftshift(x, dims) + + if version.parse(torch.__version__) >= version.parse("1.8.0"): + func = torch.fft.ifftn if inverse else torch.fft.fftn + x = torch.view_as_complex(x) + y = func(x, dim=dims, norm="ortho") + y = torch.view_as_real(y) + else: + signal_ndim = max(dims) + assert dims == list(range(1, signal_ndim + 1)) + func = torch.ifft if inverse else torch.fft + y = func(input=x, signal_ndim=signal_ndim, normalized=True) + + if centered: + y = fftshift(y, dims) + + return y diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py new file mode 100644 index 00000000000..fe3098d72f7 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/export_model.py @@ -0,0 +1,50 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .grid_sample import GridSample + + +class MyModel(nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.grid_sample = GridSample() + + def forward(self, x, grid): + return self.grid_sample.apply(x, grid) + + +def export(inp_shape=[5, 3, 6, 9], grid_shape=[5, 6, 9, 2]): + np.random.seed(324) + torch.manual_seed(32) + + if inp_shape[2] != grid_shape[1]: + raise Exception('Input height (got {}) should be equal to grid height (got {})'.format(inp_shape[2], grid_shape[1])) + if inp_shape[3] != grid_shape[2]: + raise Exception('Input width (got {}) should be equal to grid width (got {})'.format(inp_shape[3], grid_shape[2])) + + model = MyModel() + inp = Variable(torch.randn(inp_shape)) + grid = torch.Tensor(np.random.uniform(low=-2, high=2, size=grid_shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, (inp, grid), 'model.onnx', + input_names=['input', 'input1'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp, grid) + np.save('inp', inp.detach().numpy()) + np.save('inp1', grid.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--inp_shape', type=int, nargs='+', default=[5, 3, 6, 9]) + parser.add_argument('--grid_shape', type=int, nargs='+', default=[5, 6, 9, 2]) + args = parser.parse_args() + + export(args.inp_shape, args.grid_shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py new file mode 100644 index 00000000000..f69fb7177ae --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/grid_sample/grid_sample.py @@ -0,0 +1,12 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class GridSample(torch.autograd.Function): + @staticmethod + def symbolic(g, x, grid): + return g.op('GridSample', x, grid) + + @staticmethod + def forward(self, x, grid): + return F.grid_sample(x, grid, 'bilinear', 'zeros', True) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py new file mode 100644 index 00000000000..9f2467b720a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/export_model.py @@ -0,0 +1,69 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .sparse_conv import SparseConvONNX, SparseConvTransposeONNX + + +def export(num_inp_points, num_out_points, max_grid_extent, in_channels, + filters, kernel_size, normalize, transpose): + np.random.seed(324) + torch.manual_seed(32) + + if transpose: + sparse_conv = SparseConvTransposeONNX(in_channels=in_channels, + filters=filters, + kernel_size=kernel_size, + use_bias=False, + normalize=False) + else: + sparse_conv = SparseConvONNX(in_channels=in_channels, + filters=filters, + kernel_size=kernel_size, + use_bias=False, + normalize=False) + + # Generate a list of unique positions and add a mantissa + def gen_pos(num_points): + inp_pos = np.random.randint(0, max_grid_extent, [num_points, 3]) + inp_pos = np.unique(inp_pos, axis=0).astype(np.float32) + inp_pos = torch.tensor(inp_pos) + torch.rand(inp_pos.shape, dtype=torch.float32) # [0, 1) + return inp_pos + + inp_pos = gen_pos(num_inp_points) + out_pos = gen_pos(num_out_points) if num_out_points else inp_pos + + features = torch.randn([inp_pos.shape[0], in_channels]) + + voxel_size = torch.tensor(1.0) + sparse_conv.eval() + + new_kernel = torch.randn(sparse_conv.state_dict()["kernel"].shape) + sparse_conv.load_state_dict({"kernel": new_kernel, + "offset": sparse_conv.state_dict()["offset"]}) + + with torch.no_grad(): + torch.onnx.export(sparse_conv, (features, inp_pos, out_pos, voxel_size), 'model.onnx', + input_names=['input', 'input1', 'input2', 'voxel_size'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = sparse_conv(features, inp_pos, out_pos, voxel_size) + np.save('inp', features.detach().numpy()) + np.save('inp1', inp_pos.detach().numpy()) + np.save('inp2', out_pos.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--num_points', type=int) + parser.add_argument('--max_grid_extent', type=int) + parser.add_argument('--in_channels', type=int) + parser.add_argument('--filters', type=int) + parser.add_argument('--kernel_size', type=int) + args = parser.parse_args() + + export(args.num_points, args.max_grid_extent, + args.in_channels, args.filters, args.kernel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py new file mode 100644 index 00000000000..54f4dbb309f --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/sparse_conv/sparse_conv.py @@ -0,0 +1,55 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from open3d.ml.torch.layers import SparseConv, SparseConvTranspose + +class SparseConvFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): + kernel = cls.state_dict()["kernel"] + offset = cls.state_dict()["offset"] + kernel = g.op("Constant", value_t=kernel) + offset = g.op("Constant", value_t=offset) + return g.op("SparseConv", feat, in_pos, out_pos, kernel, offset) + + @staticmethod + def forward(self, cls, feat, in_pos, out_pos, voxel_size): + return cls.origin_forward(feat, in_pos, out_pos, voxel_size) + + +class SparseConvONNX(SparseConv): + """ + This is a support class which helps export network with SparseConv in ONNX format. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + + def forward(self, feat, in_pos, out_pos, voxel_size): + return SparseConvFunc.apply(self, feat, in_pos, out_pos, voxel_size) + + +class SparseConvTransposeFunc(torch.autograd.Function): + @staticmethod + def symbolic(g, cls, feat, in_pos, out_pos, voxel_size): + kernel = cls.state_dict()["kernel"] + offset = cls.state_dict()["offset"] + kernel = g.op("Constant", value_t=kernel) + offset = g.op("Constant", value_t=offset) + return g.op("SparseConvTranspose", feat, in_pos, out_pos, kernel, offset) + + @staticmethod + def forward(self, cls, feat, in_pos, out_pos, voxel_size): + return cls.origin_forward(feat, in_pos, out_pos, voxel_size) + + +class SparseConvTransposeONNX(SparseConvTranspose): + """ + This is a support class which helps export network with SparseConvTranspose in ONNX format. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.origin_forward = super().forward + + def forward(self, feat, in_pos, out_pos, voxel_size): + return SparseConvTransposeFunc.apply(self, feat, in_pos, out_pos, voxel_size) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md new file mode 100644 index 00000000000..c46e767975d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/README.md @@ -0,0 +1,47 @@ +Guide of how to enable PyTorch `nn.MaxUnpool2d` in Intel OpenVINO. + + +## Description +There are two problems with OpenVINO and MaxUnpool at the moment of this guide creation: + +* OpenVINO does not have Unpooling kernels +* PyTorch -> ONNX conversion is unimplemented for `nn.MaxUnpool2d` + +So following this guide you will learn +* How to perform PyTorch -> ONNX conversion for unsupported layers +* How to convert ONNX to OpenVINO Intermediate Respresentation (IR) with extensions +* How to write custom CPU layers in OpenVINO + +## Get ONNX model + +MaxUnpool layer in PyTorch takes two inputs - input `features` from any layer and `indices` after MaxPool layer: + +```python +self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) +self.unpool = nn.MaxUnpool2d(2, stride=2) + +output, indices = self.pool(x) +# ... +unpooled = self.unpool(features, indices) +``` + +If your version of PyTorch does not support ONNX model conversion with MaxUnpool, replace every unpool layer definition +```python +self.unpool = nn.MaxUnpool2d(2, stride=2) +``` +to +```python +self.unpool = Unpool2d() +``` + +where `Unpool2d` defined in [unpool.py](./unpool.py). Also, replace op usage from + +```python +self.unpool(features, indices) +``` +to +```python +self.unpool.apply(features, indices) +``` + +See complete example in [export_model.py](./export_model.py). diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py new file mode 100644 index 00000000000..e229e47adc3 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/export_model.py @@ -0,0 +1,56 @@ +import numpy as np +import argparse +import torch +import torch.nn as nn +from torch.autograd import Variable +from .unpool import Unpool2d + +np.random.seed(324) +torch.manual_seed(32) + +class MyModel(nn.Module): + def __init__(self, mode): + super(MyModel, self).__init__() + self.mode = mode + self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) + self.conv1 = nn.Conv2d(3, 4, kernel_size=1, stride=1) + self.conv2 = nn.Conv2d(4, 4, kernel_size=1, stride=1) + self.unpool = Unpool2d() + + def forward(self, x): + y = self.conv1(x) + output, indices = self.pool(y) + conv = self.conv2(output) + if self.mode == 'default': + return self.unpool.apply(conv, indices) + elif self.mode == 'dynamic_size': + return self.unpool.apply(conv, indices, x) + else: + raise Exception('Unknown mode: ' + self.mode) + + +def export(mode, shape=[5, 3, 6, 8]): + np.random.seed(324) + torch.manual_seed(32) + + model = MyModel(mode) + inp = Variable(torch.randn(shape)) + model.eval() + + with torch.no_grad(): + torch.onnx.export(model, inp, 'model.onnx', + input_names=['input'], + output_names=['output'], + operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) + + ref = model(inp) + np.save('inp', inp.detach().numpy()) + np.save('ref', ref.detach().numpy()) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Generate ONNX model and test data') + parser.add_argument('--mode', choices=['default', 'dynamic_size'], help='Specify Unpooling behavior') + parser.add_argument('--shape', type=int, nargs='+', default=[5, 3, 6, 8]) + args = parser.parse_args() + export(args.mode, args.shape) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py new file mode 100644 index 00000000000..52f961fe2cc --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/examples/unpool/unpool.py @@ -0,0 +1,17 @@ +import torch +import torch.nn as nn + +class Unpool2d(torch.autograd.Function): + @staticmethod + def symbolic(g, x, indices, output_size=None): + if output_size: + return g.op('Unpooling', x, indices, output_size) + else: + return g.op('Unpooling', x, indices) + + @staticmethod + def forward(self, x, indices, output_size=None): + if not output_size is None: + return nn.MaxUnpool2d(2, stride=2)(x, indices, output_size=output_size.size()) + else: + return nn.MaxUnpool2d(2, stride=2)(x, indices) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py new file mode 100644 index 00000000000..8e52a177bfd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/front/onnx/max_unpool.py @@ -0,0 +1,46 @@ +# mo_extensions/front/onnx/max_unpool.py +import numpy as np + +from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph +from openvino.tools.mo.graph.graph import Graph +from mo_extensions.ops.MaxPoolGrad import MaxPoolGrad +from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr + +class MaxUnpool(FrontReplacementSubgraph): + enabled = True + + def pattern(self): + return dict( + nodes=[ + ('max_pool0', dict(op='MaxPool')), + ('max_pool1', dict(op='MaxPool')), + ('slice', dict(op='AttributedSlice')), + ('sub', dict(op='Sub')), + ('unpool', dict(op='Unpooling')), + ], + edges=[ + ('max_pool1', 'slice'), + ('max_pool0', 'sub', {'in': 0}), + ('slice', 'sub', {'in': 1}), + ('sub', 'unpool', {'in': 1}), + ]) + + @staticmethod + def replace_sub_graph(graph: Graph, match: dict): + max_pool = match['max_pool0'] + max_pool_input = max_pool.in_port(0).get_source().node + unpool = match['unpool'] + unpool_input = unpool.in_port(0).get_source().node + + max_pool.out_port(1).disconnect() + + # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] + inputs = [max_pool_input, max_pool, unpool_input] + + res = MaxPoolGrad(graph, dict(name=unpool.name + '/fused')).create_node(inputs) + unpool.out_port(0).get_connection().set_source(res.out_port(0)) + + if len(unpool.in_ports()) == 3: + unpool.in_port(2).get_source().connect(res.in_port(3)) + else: + max_pool_input.out_port(0).connect(res.in_port(3)) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py new file mode 100644 index 00000000000..4011e697695 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/mo_extensions/ops/MaxPoolGrad.py @@ -0,0 +1,24 @@ +# mo_extensions/ops/MaxPoolGrad.py +import numpy as np +from openvino.tools.mo.graph.graph import Node, Graph +from openvino.tools.mo.ops.op import Op + +def shape_infer(node): + # Inputs: [max_pool_input, max_pool_output, unpool_input, shape] + assert(len(node.in_nodes()) == 4) + node.out_node(0).shape = node.in_node(0).shape + node.out_node(0).shape[2] = node.in_node(3).shape[2] + node.out_node(0).shape[3] = node.in_node(3).shape[3] + +class MaxPoolGrad(Op): + op = 'MaxPoolGrad' + enabled = True + + def __init__(self, graph: Graph, attrs: dict): + super().__init__(graph, { + 'type': __class__.op, + 'op': __class__.op, + 'in_ports_count': 4, + 'out_ports_count': 1, + 'infer': shape_infer + }, attrs) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py new file mode 100644 index 00000000000..ccef5c08085 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/openvino_extensions/__init__.py @@ -0,0 +1,23 @@ +import os +import sys + +def get_extensions_path(): + lib_name = 'user_cpu_extension' + if sys.platform == 'win32': + lib_name += '.dll' + elif sys.platform == 'linux': + lib_name = 'lib' + lib_name + '.so' + else: + lib_name = 'lib' + lib_name + '.dylib' + return os.path.join(os.path.dirname(__file__), lib_name) + + +# This is a dummy procedure which instantiates onnx_importer library preloading +try: + import io + from openvino.inference_engine import IECore + ie = IECore() + buf = io.BytesIO() + ie.read_network(buf.getvalue(), b"", init_from_buffer=True) +except Exception: + pass diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py new file mode 100644 index 00000000000..aa3a9d6fb11 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/setup.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +import os +from setuptools import setup + +if not 'VERSION' in os.environ: + raise Exception('Specify package version by environment variable') + +if not 'EXT_LIB' in os.environ: + raise Exception('Specify environment variable with a path to extensions library') + +setup(name='openvino-extensions', + version=os.environ['VERSION'], + author='Dmitry Kurtaev', + url='https://github.com/dkurt/openvino_pytorch_layers', + packages=['openvino_extensions'], + data_files=[('../../openvino_extensions', [os.environ['EXT_LIB']])], + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + ], +) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt new file mode 100644 index 00000000000..0ccfb5af363 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/requirements.txt @@ -0,0 +1,5 @@ +torch==1.8.1 +torchvision==0.9.1 +open3d==0.14.1 +tensorboard +pytest diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py new file mode 100644 index 00000000000..4cda23cbf2e --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/tests/run_tests.py @@ -0,0 +1,147 @@ +# NOTE: import order is critical for now: extensions, openvino and only then numpy +from openvino_extensions import get_extensions_path +from openvino.runtime import Core + +import subprocess +import pytest +from pathlib import Path + +import numpy as np + +def convert_model(): + subprocess.run(['mo', + '--input_model=model.onnx', + # '--extension', "user_ie_extensions/build/libuser_cpu_extension.so"], + '--extension', get_extensions_path()], + check=True) + +def run_test(convert_ir=True, test_onnx=False, num_inputs=1, threshold=1e-5): + if convert_ir and not test_onnx: + convert_model() + + inputs = {} + shapes = {} + for i in range(num_inputs): + suffix = '{}'.format(i if i > 0 else '') + data = np.load('inp' + suffix + '.npy') + inputs['input' + suffix] = data + shapes['input' + suffix] = data.shape + + ref = np.load('ref.npy') + + ie = Core() + # ie.add_extension("user_ie_extensions/build/libuser_cpu_extension.so") + ie.add_extension(get_extensions_path()) + # ie.set_config({'CONFIG_FILE': 'user_ie_extensions/gpu_extensions.xml'}, 'GPU') + + net = ie.read_model('model.onnx' if test_onnx else 'model.xml') + net.reshape(shapes) + exec_net = ie.compile_model(net, 'CPU') + + out = exec_net.infer_new_request(inputs) + out = next(iter(out.values())) + + assert ref.shape == out.shape + diff = np.max(np.abs(ref - out)) + assert diff <= threshold + + +# def test_unpool(): +# from examples.unpool.export_model import export +# export(mode='default') +# run_test() + + +# def test_unpool_reshape(): +# from examples.unpool.export_model import export +# export(mode='dynamic_size', shape=[5, 3, 6, 9]) +# run_test() + +# export(mode='dynamic_size', shape=[4, 3, 17, 8]) +# run_test(convert_ir=False) + +@pytest.mark.parametrize("shape", [[5, 120, 2], [4, 240, 320, 2], [3, 16, 240, 320, 2], [4, 5, 16, 31, 2]]) +@pytest.mark.parametrize("inverse", [False, True]) +@pytest.mark.parametrize("centered", [False, True]) +@pytest.mark.parametrize("test_onnx", [False, True]) +@pytest.mark.parametrize("dims", [[1], [1, 2], [2, 3]]) +def test_fft(shape, inverse, centered, test_onnx, dims): + from examples.fft.export_model import export + + if len(shape) == 3 and dims != [1] or \ + len(shape) == 4 and dims == [2, 3] or \ + len(shape) == 5 and dims == [1] or \ + centered and len(dims) != 2: + pytest.skip("unsupported configuration") + + export(shape, inverse, centered, dims) + run_test(test_onnx=test_onnx) + + +@pytest.mark.parametrize("test_onnx", [False, True]) +def test_grid_sample(test_onnx): + from examples.grid_sample.export_model import export + + export() + run_test(num_inputs=2, test_onnx=test_onnx) + + +@pytest.mark.parametrize("shape", [[3, 2, 4, 8, 2], [3, 1, 4, 8, 2]]) +@pytest.mark.parametrize("test_onnx", [False, True]) +def test_complex_mul(shape, test_onnx): + from examples.complex_mul.export_model import export + + export(other_shape=shape) + run_test(num_inputs=2, test_onnx=test_onnx) + + +@pytest.mark.parametrize("in_channels", [1, 3]) +@pytest.mark.parametrize("filters", [1, 4]) +@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5], [2, 2, 2]]) +@pytest.mark.parametrize("normalize", [False, True]) +@pytest.mark.parametrize("out_pos", [None, 16]) +def test_sparse_conv(in_channels, filters, kernel_size, normalize, out_pos): + from examples.sparse_conv.export_model import export + + export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, + filters=filters, kernel_size=kernel_size, normalize=normalize, + transpose=False) + run_test(num_inputs=3, test_onnx=True, threshold=1e-4) + + +@pytest.mark.parametrize("in_channels", [1, 3]) +@pytest.mark.parametrize("filters", [1, 4]) +@pytest.mark.parametrize("kernel_size", [[3, 3, 3], [5, 5, 5]]) +@pytest.mark.parametrize("normalize", [False]) +@pytest.mark.parametrize("out_pos", [None, 16]) +def test_sparse_conv_transpose(in_channels, filters, kernel_size, normalize, out_pos): + from examples.sparse_conv.export_model import export + + export(num_inp_points=1000, num_out_points=out_pos, max_grid_extent=4, in_channels=in_channels, + filters=filters, kernel_size=kernel_size, normalize=normalize, + transpose=True) + run_test(num_inputs=3, test_onnx=True, threshold=1e-4) + + +def test_calculate_grid(): + from examples.calculate_grid.export_model import export + export(num_points=10, max_grid_extent=5) + run_test(test_onnx=True) + + +def test_deformable_conv(): + from examples.deformable_conv.export_model import export + + export( + inplanes=15, + outplanes=15, + kernel_size=3, + stride=1, + padding=1, + dilation=1, + deformable_groups=1, + inp_shape=[1, 15, 128, 240], + offset_shape=[1, 18, 128, 240], + ) + run_test(num_inputs=2, threshold=2e-5) + run_test(num_inputs=2, test_onnx=True, threshold=2e-5) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt new file mode 100644 index 00000000000..f5b0576a26d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/CMakeLists.txt @@ -0,0 +1,27 @@ +# Copyright (C) 2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +# [cmake:extension] +set(CMAKE_CXX_STANDARD 11) + +set(TARGET_NAME "user_cpu_extension") + +find_package(OpenVINO REQUIRED) +find_package(OpenCV REQUIRED COMPONENTS core) +# find_package(TBB REQUIRED tbb tbbmalloc) + +file(GLOB_RECURSE SRC *.cpp) + +add_library(${TARGET_NAME} SHARED ${SRC}) + +target_compile_definitions(${TARGET_NAME} PRIVATE IMPLEMENT_OPENVINO_EXTENSION_API) + +if (OpenCV_FOUND) + target_include_directories(${TARGET_NAME} PRIVATE ${OpenCV_INCLUDE_DIRS}) +endif() + +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime + # ${TBB_IMPORTED_TARGETS} + ) +# [cmake:extension] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp new file mode 100644 index 00000000000..5545c737013 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.cpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "calculate_grid.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +CalculateGrid::CalculateGrid(const ov::Output& inp_pos) : Op({inp_pos}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void CalculateGrid::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr CalculateGrid::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 1, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool CalculateGrid::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool CalculateGrid::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inpPos = reinterpret_cast(inputs[0].data()); + float* out = reinterpret_cast(outputs[0].data()); + + std::set > outPos; + + const size_t numPoints = inputs[0].get_shape()[0]; + static const std::vector > filters {{-1, -1, -1}, {-1, -1, 0}, {-1, 0, -1}, + {-1, 0, 0}, {0, -1, -1}, {0, -1, 0}, + {0, 0, -1}, {0, 0, 0}}; + + std::vector pos(3); + for (size_t i = 0; i < numPoints; ++i) { + for (size_t j = 0; j < filters.size(); ++j) { + bool isValid = true; + for (size_t k = 0; k < 3; ++k) { + int val = static_cast(inpPos[i * 3 + k]) + filters[j][k]; + if (val < 0 || val % 2) { + isValid = false; + break; + } + pos[k] = val; + } + if (isValid) + outPos.insert(std::make_tuple(pos[0], pos[1], pos[2])); + } + } + + int i = 0; + for (const auto it : outPos) { + out[i * 3] = 0.5f + std::get<0>(it); + out[i * 3 + 1] = 0.5f + std::get<1>(it); + out[i * 3 + 2] = 0.5f + std::get<2>(it); + i += 1; + } + memset(out + i * 3, 0, sizeof(float) * 3 * (numPoints - i)); + out[i * 3] = -1.0f; + return true; +} + +bool CalculateGrid::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp new file mode 100644 index 00000000000..b436f2d39d0 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/calculate_grid.hpp @@ -0,0 +1,29 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class CalculateGrid : public ov::op::Op { +public: + OPENVINO_OP("CalculateGrid"); + + CalculateGrid() = default; + CalculateGrid(const ov::Output& inp_pos); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp new file mode 100644 index 00000000000..86f4af2e464 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "complex_mul.hpp" +// #include +#include + +using namespace TemplateExtension; + +//! [op:ctor] +ComplexMultiplication::ComplexMultiplication( + const ov::Output& inp0, + const ov::Output& inp1) : Op({inp0, inp1}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void ComplexMultiplication::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(1), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr ComplexMultiplication::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool ComplexMultiplication::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool ComplexMultiplication::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inp0 = reinterpret_cast(inputs[0].data()); + const float* inp1 = reinterpret_cast(inputs[1].data()); + float* out = reinterpret_cast(outputs[0].data()); + + size_t channels0 = inputs[0].get_shape()[1]; + size_t channels1 = inputs[1].get_shape()[1]; + size_t batch = inputs[0].get_shape()[0]; + size_t spatialSize = inputs[0].get_shape()[2] * inputs[0].get_shape()[3]; + + // x1 = x_r * y_r - x_i * y_i + // x2 = x_r * y_i + x_i * y_r + if (channels0 == channels1) + // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { + for (size_t ch = 0; ch < channels0 * batch; ++ch) { + for (int i = 0; i < spatialSize; ++i) { + int outIdx = (ch * spatialSize + i) * 2; + float real0 = inp0[outIdx]; + float imag0 = inp0[outIdx + 1]; + float real1 = inp1[outIdx]; + float imag1 = inp1[outIdx + 1]; + out[outIdx] = real0 * real1 - imag0 * imag1; + out[outIdx + 1] = real0 * imag1 + imag0 * real1; + } + } + else if (channels1 == 1) + // InferenceEngine::parallel_for(channels0 * batch, [&](size_t ch) { + for (size_t ch = 0; ch < channels0 * batch; ++ch) { + int b = ch / channels0; + for (int i = 0; i < spatialSize; ++i) { + int outIdx = (ch * spatialSize + i) * 2; + int inpIdx = (b * spatialSize + i) * 2; + float real0 = inp0[outIdx]; + float imag0 = inp0[outIdx + 1]; + float real1 = inp1[inpIdx]; + float imag1 = inp1[inpIdx + 1]; + out[outIdx] = real0 * real1 - imag0 * imag1; + out[outIdx + 1] = real0 * imag1 + imag0 * real1; + } + } + else + IE_THROW() << "Wrong number of channels for second input!"; + + return true; +} + +bool ComplexMultiplication::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp new file mode 100644 index 00000000000..9dd487f3a0a --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/complex_mul.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class ComplexMultiplication : public ov::op::Op { +public: + OPENVINO_OP("ComplexMultiplication"); + + ComplexMultiplication() = default; + ComplexMultiplication(const ov::Output& inp0, + const ov::Output& inp1); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp new file mode 100644 index 00000000000..86c8185097f --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.cpp @@ -0,0 +1,372 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "fft.hpp" + +#include +#include
+#include + +using namespace TemplateExtension; + +std::unique_ptr so; +using cvCreateMatHeaderF = CvMat*(int, int, int); +using cvSetDataF = void(CvArr*, void*, int); +using cvReleaseMatF = void(CvMat**); +using cvDftF = void(const CvArr*, CvArr*, int, int); +using cvScaleF = void(const CvArr*, CvArr*, double, double); +using cvCloneMatF = CvMat*(const CvMat*); +using cvCopyF = void(const CvArr*, const CvArr*, const CvArr*); +using cvInitMatHeaderF = CvMat*(CvMat*, int, int, int, void*, int); +using cvGetRawDataF = void(const CvArr*, uchar**, int* step, CvSize* roi_size); +using cvReshapeF = CvMat*(const CvArr*, CvMat*, int, int); +using cvCreateDataF = void(CvArr*); +using cvReleaseDataF = void(CvArr*); + +bool loadOpenCV() { + static bool loaded = false; + if (!loaded) { + loaded = true; + try { +#ifdef _WIN32 + so.reset(new InferenceEngine::details::SharedObjectLoader("opencv_core.dll")); +#elif defined(__APPLE__) + so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.dylib")); +#else + so.reset(new InferenceEngine::details::SharedObjectLoader("libopencv_core.so")); +#endif + } catch (InferenceEngine::details::InferenceEngineException& ex) { + return false; + } + } + return loaded; +} + +void fftshift(CvMat* src, bool inverse) { + static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); + static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); + static auto cvInitMatHeader = reinterpret_cast(so->get_symbol("cvInitMatHeader")); + static auto cvGetRawData = reinterpret_cast(so->get_symbol("cvGetRawData")); + static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); + + + // tl | tr br | bl + // ---+--- -> ---+--- + // bl | br tr | tl + + float* data; + int step; + CvSize size; + cvGetRawData(src, (uchar**)&data, &step, &size); + + int height = size.height; + int width = size.width; + int h2 = height / 2; + int w2 = width / 2; + + if (height % 2 || width % 2) { + // Swap rows. + CvMat* srcTop = new CvMat(); + CvMat* srcBot = new CvMat(); + CvMat* dstTop = new CvMat(); + CvMat* dstBot = new CvMat(); + int topH = inverse ? h2 : (h2 + height % 2); + int botH = height - topH; + cvInitMatHeader(srcTop, topH, width, CV_32FC2, data, step); + cvInitMatHeader(srcBot, botH, width, CV_32FC2, data + topH * width * 2, step); + cvInitMatHeader(dstTop, topH, width, CV_32FC2, data + botH * width * 2, step); + cvInitMatHeader(dstBot, botH, width, CV_32FC2, data, step); + + CvMat* tmp = cvCloneMat(srcTop); + cvCopy(srcBot, dstBot, 0); + cvCopy(tmp, dstTop, 0); + + cvReleaseMat(&tmp); + delete srcTop; + delete srcBot; + delete dstTop; + delete dstBot; + + // Swap columns. + CvMat* srcL = new CvMat(); + CvMat* srcR = new CvMat(); + CvMat* dstL = new CvMat(); + CvMat* dstR = new CvMat(); + int leftW = inverse ? w2 : (w2 + width % 2); + int rightW = width - leftW; + + cvInitMatHeader(srcL, height, leftW, CV_32FC2, data, step); + cvInitMatHeader(srcR, height, rightW, CV_32FC2, data + leftW * 2, step); + cvInitMatHeader(dstL, height, leftW, CV_32FC2, data + rightW * 2, step); + cvInitMatHeader(dstR, height, rightW, CV_32FC2, data, step); + + tmp = cvCloneMat(srcL); + cvCopy(srcR, dstR, 0); + cvCopy(tmp, dstL, 0); + + cvReleaseMat(&tmp); + delete srcL; + delete srcR; + delete dstL; + delete dstR; + + return; + } + + CvMat* tl = new CvMat(); + CvMat* tr = new CvMat(); + CvMat* bl = new CvMat(); + CvMat* br = new CvMat(); + + cvInitMatHeader(tl, h2, w2, CV_32FC2, data, step); + cvInitMatHeader(tr, h2, w2, CV_32FC2, data + width, step); + cvInitMatHeader(bl, h2, w2, CV_32FC2, data + height * width, step); + cvInitMatHeader(br, h2, w2, CV_32FC2, data + height * width + width, step); + + CvArr* mask = 0; + CvMat* tmp = cvCloneMat(tl); + cvCopy(br, tl, mask); + cvCopy(tmp, br, mask); + + cvCopy(tr, tmp, mask); + cvCopy(bl, tr, mask); + cvCopy(tmp, bl, mask); + + cvReleaseMat(&tmp); + + delete tl; + delete tr; + delete bl; + delete br; +} + +//! [op:ctor] +FFT::FFT(const ov::Output& inp, + const ov::Output& dims, + bool inverse, + bool centered) : Op({inp, dims}) { + loadOpenCV(); + constructor_validate_and_infer_types(); + this->inverse = inverse; + this->centered = centered; +} +//! [op:ctor] + +//! [op:validate] +void FFT::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr FFT::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), inverse, centered); +} +//! [op:copy] + +//! [op:visit_attributes] +bool FFT::visit_attributes(ov::AttributeVisitor& visitor) { + int inverse_i = static_cast(inverse); + int centered_i = static_cast(centered); + visitor.on_attribute("inverse", inverse_i); + visitor.on_attribute("centered", centered_i); + inverse = static_cast(inverse_i); + centered = static_cast(centered_i); + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool FFT::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + static auto cvSetData = reinterpret_cast(so->get_symbol("cvSetData")); + static auto cvCreateMatHeader = reinterpret_cast(so->get_symbol("cvCreateMatHeader")); + static auto cvDFT = reinterpret_cast(so->get_symbol("cvDFT")); + static auto cvScale = reinterpret_cast(so->get_symbol("cvConvertScale")); + static auto cvReleaseMat = reinterpret_cast(so->get_symbol("cvReleaseMat")); + static auto cvReshape = reinterpret_cast(so->get_symbol("cvReshape")); + static auto cvCloneMat = reinterpret_cast(so->get_symbol("cvCloneMat")); + static auto cvCreateData = reinterpret_cast(so->get_symbol("cvCreateData")); + static auto cvReleaseData = reinterpret_cast(so->get_symbol("cvReleaseData")); + static auto cvCopy = reinterpret_cast(so->get_symbol("cvCopy")); + + float* inpData = reinterpret_cast(inputs[0].data()); + + if (inputs[1].get_element_type() != ov::element::i32) + IE_THROW() << "Unexpected dims type: " << inputs[1].get_element_type(); + + int32_t* signalDimsData = reinterpret_cast(inputs[1].data()); + float* outData = reinterpret_cast(outputs[0].data()); + std::vector dims = inputs[0].get_shape(); + const size_t numSignalDims = inputs[1].get_shape()[0]; + + if (!(dims.size() == 3 && (numSignalDims == 1 && signalDimsData[0] == 1) || + dims.size() == 4 && ((numSignalDims == 1 && signalDimsData[0] == 1) || + (numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2)) || + dims.size() == 5 && ((numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) || + (numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3)))) { + std::ostringstream ss; + for (size_t i = 0; i < numSignalDims; ++i) + ss << signalDimsData[i] << " "; + IE_THROW() << "Unsupported configuration: Input dims " << dims.size() << " and signal dims " << ss.str(); + } + + const int batch = dims[0]; + + if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { + const int channels = dims[1]; + int rows = dims[2]; + int cols = dims[3]; + const int planeSize = channels * rows * cols; + // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { + for (size_t d = 0; d < batch * cols; ++d) { + int b = d / cols; + int col = d % cols; + // Copy a slice from input + CvMat* inpSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); + CvMat* outSlice = cvCreateMatHeader(channels * rows, 1, CV_32FC2); + cvSetData(inpSlice, reinterpret_cast(inpData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); + cvSetData(outSlice, reinterpret_cast(outData + (b * planeSize + col) * 2), cols * 2 * sizeof(float)); + + CvMat* inp_col = cvCloneMat(inpSlice); + + CvMat inp_header, *inp; + inp = cvReshape(inp_col, &inp_header, 2, channels); + + CvMat* out = cvCreateMatHeader(channels, rows, CV_32FC2); + cvCreateData(out); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(channels * rows), 0); + + if (centered) + fftshift(out, false); + + CvMat out_col_header, *out_col; + out_col = cvReshape(out, &out_col_header, 2, channels * rows); + + cvCopy(out_col, outSlice, 0); + + cvReleaseData(inp_col); + cvReleaseMat(&inp_col); + + cvReleaseData(out); + cvReleaseMat(&out); + + cvReleaseMat(&inpSlice); + cvReleaseMat(&outSlice); + } + } else if (dims.size() == 5 && numSignalDims == 2 && signalDimsData[0] == 2 && signalDimsData[1] == 3) { + const int channels = dims[1]; + int rows = dims[2]; + int cols = dims[3]; + int planeSize = rows * cols * 2; // 2 is last dimension size + // InferenceEngine::parallel_for(batch * channels, [&](size_t d) { + for (size_t d = 0; d < batch * channels; ++d) { + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 4 && numSignalDims == 2 && signalDimsData[0] == 1 && signalDimsData[1] == 2) { + int rows = dims[1]; + int cols = dims[2]; + int planeSize = rows * cols * 2; // 2 is last dimension size + // InferenceEngine::parallel_for(batch, [&](size_t d) { + for (size_t d = 0; d < batch; ++d) { + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + d * planeSize), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + d * planeSize), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(cols * rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 4 && numSignalDims == 1 && signalDimsData[0] == 1) { + int rows = dims[1]; + int cols = dims[2]; + + const int planeSize = rows; + // InferenceEngine::parallel_for(batch * cols, [&](size_t d) { + for (size_t d = 0; d < batch * cols; ++d) { + int b = d / cols; + int col = d % cols; + CvMat* inp = cvCreateMatHeader(rows, 1, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, 1, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData + (b * planeSize * cols + col) * 2), cols * 2 * sizeof(float)); + + if (centered) + fftshift(inp, true); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD, 0); + cvScale(out, out, 1.0 / sqrtf(rows), 0); + + if (centered) + fftshift(out, false); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + } else if (dims.size() == 3) { + int rows = dims[0]; + int cols = dims[1]; + CvMat* inp = cvCreateMatHeader(rows, cols, CV_32FC2); + CvMat* out = cvCreateMatHeader(rows, cols, CV_32FC2); + cvSetData(inp, reinterpret_cast(inpData), cols * 2 * sizeof(float)); + cvSetData(out, reinterpret_cast(outData), cols * 2 * sizeof(float)); + + if (inverse) + cvDFT(inp, out, CV_DXT_INVERSE | CV_DXT_ROWS, 0); + else + cvDFT(inp, out, CV_DXT_FORWARD | CV_DXT_ROWS, 0); + cvScale(out, out, 1.0 / sqrtf(cols), 0); + + cvReleaseMat(&inp); + cvReleaseMat(&out); + } + return true; +} + +bool FFT::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp new file mode 100644 index 00000000000..f541f91c66d --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/fft.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class FFT : public ov::op::Op { +public: + OPENVINO_OP("FFT"); + + FFT() = default; + FFT(const ov::Output& inp, + const ov::Output& dims, + bool inverse, + bool centered); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; + +private: + bool inverse = false; + bool centered = false; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp new file mode 100644 index 00000000000..94fa44336f2 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.cpp @@ -0,0 +1,125 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "grid_sample.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +GridSample::GridSample(const ov::Output& inp, + const ov::Output& grid) : Op({inp, grid}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void GridSample::validate_and_infer_types() { + auto outShape = get_input_partial_shape(0); // NC + // Grid input has a shape NxHxWx2 + auto gridShape = get_input_partial_shape(1); + outShape[2] = gridShape[1]; // H + outShape[3] = gridShape[2]; // W + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr GridSample::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 2, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool GridSample::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool GridSample::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* inpData = reinterpret_cast(inputs[0].data()); + const float* gridData = reinterpret_cast(inputs[1].data()); + float* outData = reinterpret_cast(outputs[0].data()); + + std::vector inpDims = inputs[0].get_shape(); + std::vector outDims = outputs[0].get_shape(); + + const int batch = outDims[0]; + const int channels = outDims[1]; + const int height = outDims[2]; + const int width = outDims[3]; + const int inpHeight = inpDims[2]; + const int inpWidth = inpDims[3]; + const int inpPlane = inpHeight * inpWidth; + const int outPlane = height * width; + + std::vector zerosPlane(inpDims[1] * inpDims[2] * inpDims[3], 0); + float* zeros = zerosPlane.data(); + + // InferenceEngine::parallel_for(batch, [&](int d) { + for (int d = 0; d < batch; ++d) { + const float* inp = inpData + d * channels * inpPlane; + const float* grid = gridData + d * outPlane * 2; + for (int y = 0; y < height; ++y) { + for (int x = 0; x < width; ++x) { + int offset = y * width + x; + + float input_x = 0.5f * (grid[offset * 2] + 1) * (inpWidth - 1); + int x0 = std::floor(input_x); + int x1 = x0 + 1; + + float input_y = 0.5f * (grid[offset * 2 + 1] + 1) * (inpHeight - 1); + int y0 = std::floor(input_y); + int y1 = y0 + 1; + + const float* inp_row0 = (0 <= y0 && y0 < inpHeight) ? inp + y0 * inpWidth : zeros; + const float* inp_row1 = (0 <= y1 && y1 < inpHeight) ? inp + y1 * inpWidth : zeros; + float* out = outData + d * channels * outPlane; + if ((x1 < 0 || inpWidth <= x1) && (x0 < 0 || inpWidth <= x0)) { + for (int c = 0; c < channels; ++c) { + out[offset] = 0; + out += outPlane; + } + } + else if (x1 < 0 || inpWidth <= x1) { + for (int c = 0; c < channels; ++c) { + out[offset] = inp_row0[x0] + + (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + + (input_x - x0) * (-inp_row0[x0] + + (input_y - y0) * (inp_row0[x0] - inp_row1[x0])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } + else if (x0 < 0 || inpWidth <= x0) { + for (int c = 0; c < channels; ++c) { + out[offset] = + (input_x - x0) * (inp_row0[x1] + (input_y - y0) * (inp_row1[x1] - inp_row0[x1])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } else { + for (int c = 0; c < channels; ++c) { + out[offset] = inp_row0[x0] + + (input_y - y0) * (inp_row1[x0] - inp_row0[x0]) + + (input_x - x0) * (inp_row0[x1] - inp_row0[x0] + + (input_y - y0) * (inp_row1[x1] - inp_row0[x1] - inp_row1[x0] + inp_row0[x0])); + out += outPlane; + inp_row0 += inpPlane; + inp_row1 += inpPlane; + } + } + } + } + } + return true; +} + +bool GridSample::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp new file mode 100644 index 00000000000..be259717045 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/grid_sample.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class GridSample : public ov::op::Op { +public: + OPENVINO_OP("GridSample"); + + GridSample() = default; + GridSample(const ov::Output& inp, + const ov::Output& grid); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp new file mode 100644 index 00000000000..e048e5f08cd --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/ov_extension.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "unpool.hpp" +#include "sparse_conv.hpp" +#include "sparse_conv_transpose.hpp" +#include "complex_mul.hpp" +#include "calculate_grid.hpp" +#include "grid_sample.hpp" +#include "fft.hpp" + +// clang-format off +//! [ov_extension:entry_point] +OPENVINO_CREATE_EXTENSIONS( + std::vector({ + // Register operation itself, required to be read from IR + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + + // Register operaton mapping, required when converted from framework model format + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>(), + std::make_shared>() + })); +//! [ov_extension:entry_point] +// clang-format on diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp new file mode 100644 index 00000000000..dd561626c43 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "sparse_conv.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +SparseConv::SparseConv(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void SparseConv::validate_and_infer_types() { + auto outShape = get_input_partial_shape(2); + auto kernelShape = get_input_partial_shape(3); + outShape[1] = kernelShape[4]; + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr SparseConv::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool SparseConv::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool SparseConv::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* features = reinterpret_cast(inputs[0].data()); + const float* inpPos = reinterpret_cast(inputs[1].data()); + const float* outPos = reinterpret_cast(inputs[2].data()); + const float* kernel = reinterpret_cast(inputs[3].data()); + const float* offset = reinterpret_cast(inputs[4].data()); + float* out = reinterpret_cast(outputs[0].data()); + memset(out, 0, outputs[0].get_byte_size()); + + size_t numInpPoints = inputs[1].get_shape()[0]; + const size_t numOutPoints = inputs[2].get_shape()[0]; + std::vector kernelDims = inputs[3].get_shape(); + + // Kernel layout is DxHxWxICxOH + const int kd = kernelDims[0]; + const int kh = kernelDims[1]; + const int kw = kernelDims[2]; + const int IC = kernelDims[3]; + const int OC = kernelDims[4]; + + // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py + float rw = kw * 0.51f; + float rh = kh * 0.51f; + float rd = kd * 0.51f; + + for (size_t i = 0; i < numInpPoints; ++i) { + if (inpPos[i * 3] < 0) { + numInpPoints = i; + break; + } + } + + for (size_t i = 0; i < numOutPoints; ++i) { + const float xi = outPos[i * 3] - offset[0]; + const float yi = outPos[i * 3 + 1] - offset[1]; + const float zi = outPos[i * 3 + 2] - offset[2]; + + // Accumulate features which inside the kernel + for (size_t j = 0; j < numInpPoints; ++j) { + const float xj = inpPos[j * 3]; + const float yj = inpPos[j * 3 + 1]; + const float zj = inpPos[j * 3 + 2]; + + if (xi - rw <= xj && xj <= xi + rw && + yi - rh <= yj && yj <= yi + rh && + zi - rd <= zj && zj <= zi + rd) { + + const int w = std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); + const int h = std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); + const int d = std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); + + const float* featuresOffset = features + j * IC; + for (size_t ic = 0; ic < IC; ++ic) { + const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); + for (size_t oc = 0; oc < OC; ++oc) { + out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; + } + } + } + } + } + return true; +} + +bool SparseConv::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp new file mode 100644 index 00000000000..8d508e725aa --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class SparseConv : public ov::op::Op { +public: + OPENVINO_OP("SparseConv"); + + SparseConv() = default; + SparseConv(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp new file mode 100644 index 00000000000..dfd8d525116 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.cpp @@ -0,0 +1,109 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "sparse_conv_transpose.hpp" + +using namespace TemplateExtension; + +//! [op:ctor] +SparseConvTranspose::SparseConvTranspose(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset) : Op({features, inp_pos, out_pos, kernel, offset}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void SparseConvTranspose::validate_and_infer_types() { + auto outShape = get_input_partial_shape(2); + auto kernelShape = get_input_partial_shape(3); + outShape[1] = kernelShape[4]; + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr SparseConvTranspose::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 5, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), new_args.at(4)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool SparseConvTranspose::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool SparseConvTranspose::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* features = reinterpret_cast(inputs[0].data()); + const float* inpPos = reinterpret_cast(inputs[1].data()); + const float* outPos = reinterpret_cast(inputs[2].data()); + const float* kernel = reinterpret_cast(inputs[3].data()); + const float* offset = reinterpret_cast(inputs[4].data()); + float* out = reinterpret_cast(outputs[0].data()); + memset(out, 0, outputs[0].get_byte_size()); + + size_t numInpPoints = inputs[1].get_shape()[0]; + const size_t numOutPoints = inputs[2].get_shape()[0]; + std::vector kernelDims = inputs[3].get_shape(); + + // Kernel layout is DxHxWxICxOH + const int kd = kernelDims[0]; + const int kh = kernelDims[1]; + const int kw = kernelDims[2]; + const int IC = kernelDims[3]; + const int OC = kernelDims[4]; + + // See https://github.com/isl-org/Open3D/blob/master/python/open3d/ml/torch/python/layers/convolutions.py + float rw = kw * 0.51f; + float rh = kh * 0.51f; + float rd = kd * 0.51f; + + for (size_t i = 0; i < numInpPoints; ++i) { + if (inpPos[i * 3] < 0) { + numInpPoints = i; + break; + } + } + + for (size_t i = 0; i < numOutPoints; ++i) { + const float xi = outPos[i * 3] - offset[0]; + const float yi = outPos[i * 3 + 1] - offset[1]; + const float zi = outPos[i * 3 + 2] - offset[2]; + + // Accumulate features which inside the kernel + for (size_t j = 0; j < numInpPoints; ++j) { + const float xj = inpPos[j * 3]; + const float yj = inpPos[j * 3 + 1]; + const float zj = inpPos[j * 3 + 2]; + + if (xi - rw <= xj && xj <= xi + rw && + yi - rh <= yj && yj <= yi + rh && + zi - rd <= zj && zj <= zi + rd) { + + const int w = kw - 1 - std::min(static_cast(xj - xi + kw * 0.5f), kw - 1); + const int h = kh - 1 - std::min(static_cast(yj - yi + kh * 0.5f), kh - 1); + const int d = kd - 1 - std::min(static_cast(zj - zi + kd * 0.5f), kd - 1); + + const float* featuresOffset = features + j * IC; + for (size_t ic = 0; ic < IC; ++ic) { + const float* kernelOffset = kernel + OC * (ic + IC * (w + kw * (h + kh * d))); + for (size_t oc = 0; oc < OC; ++oc) { + out[i * OC + oc] += kernelOffset[oc] * featuresOffset[ic]; + } + } + } + } + } + return true; +} + +bool SparseConvTranspose::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp new file mode 100644 index 00000000000..f607d5462df --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/sparse_conv_transpose.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] + +//! [op:header] +namespace TemplateExtension { + +class SparseConvTranspose : public ov::op::Op { +public: + OPENVINO_OP("SparseConvTranspose"); + + SparseConvTranspose() = default; + SparseConvTranspose(const ov::Output& features, + const ov::Output& inp_pos, + const ov::Output& out_pos, + const ov::Output& kernel, + const ov::Output& offset); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp new file mode 100644 index 00000000000..caae15055fb --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "unpool.hpp" +// #include + +using namespace TemplateExtension; + +//! [op:ctor] +Unpool::Unpool(const ov::Output& poolInp, + const ov::Output& poolOut, + const ov::Output& inp, + const ov::Output& shape) : Op({poolInp, poolOut, inp, shape}) { + constructor_validate_and_infer_types(); +} +//! [op:ctor] + +//! [op:validate] +void Unpool::validate_and_infer_types() { + auto outShape = get_input_partial_shape(3); + auto poolInpShape = get_input_partial_shape(0).to_shape(); + outShape[0] = poolInpShape[0]; // Use only spatial dimensions from shape + outShape[1] = poolInpShape[1]; // and restore batch and channels + set_output_type(0, get_input_element_type(0), outShape); +} +//! [op:validate] + +//! [op:copy] +std::shared_ptr Unpool::clone_with_new_inputs(const ov::OutputVector& new_args) const { + OPENVINO_ASSERT(new_args.size() == 4, "Incorrect number of new arguments"); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3)); +} +//! [op:copy] + +//! [op:visit_attributes] +bool Unpool::visit_attributes(ov::AttributeVisitor& visitor) { + return true; +} +//! [op:visit_attributes] + +//! [op:evaluate] +bool Unpool::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const { + const float* poolInp = reinterpret_cast(inputs[0].data()); + const float* poolOut = reinterpret_cast(inputs[1].data()); + const float* inp = reinterpret_cast(inputs[2].data()); + float* out = reinterpret_cast(outputs[0].data()); + + std::vector poolInpDims = inputs[0].get_shape(); + std::vector poolOutDims = inputs[1].get_shape(); + std::vector inpDims = inputs[2].get_shape(); + std::vector outDims = outputs[0].get_shape(); + + const size_t batch = poolInpDims[0]; + const size_t channels = poolInpDims[1]; + const size_t height = poolInpDims[2]; + const size_t width = poolInpDims[3]; + const size_t outHeight = outDims[2]; + const size_t outWidth = outDims[3]; + const size_t poolOutHeight = poolOutDims[2]; + const size_t poolOutWidth = poolOutDims[3]; + + std::vector mask(inputs[1].get_size(), false); + + memset(out, 0, outputs[0].get_byte_size()); + // InferenceEngine::parallel_for(batch*channels, [&](size_t d) { + for (size_t d = 0; d < batch * channels; ++d) { + for (int y = 0; y < height; ++y) { + for (int x = 0; x < width; ++x) { + int poolOutIdx = (d * poolOutHeight + y / 2) * poolOutWidth + x / 2; + int poolInpIdx = (d * height + y) * width + x; + int dstIdx = d * outHeight * outWidth + (y * width + x); + if (fabs(poolInp[poolInpIdx] - poolOut[poolOutIdx]) < 1e-5f && !mask[poolOutIdx]) { + out[dstIdx] = inp[poolOutIdx]; + mask[poolOutIdx] = true; + } + } + } + } + return true; +} + +bool Unpool::has_evaluate() const { + return true; +} +//! [op:evaluate] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp new file mode 100644 index 00000000000..275a80c23f7 --- /dev/null +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/openvino_pytorch_layers/user_ie_extensions/unpool.hpp @@ -0,0 +1,41 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +//! [op:common_include] +#include +//! [op:common_include] +//! [op:frontend_include] +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED +# include +#endif +//! [op:frontend_include] + +//! [op:header] +namespace TemplateExtension { + +class Unpool : public ov::op::Op { +public: + OPENVINO_OP("MaxPoolGrad"); + +#ifdef OPENVINO_ONNX_FRONTEND_ENABLED + OPENVINO_FRAMEWORK_MAP(onnx, "MaxPoolGrad") +#endif + + Unpool() = default; + Unpool(const ov::Output& poolInp, + const ov::Output& poolOut, + const ov::Output& inp, + const ov::Output& shape); + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; + bool visit_attributes(ov::AttributeVisitor& visitor) override; + + bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override; + bool has_evaluate() const override; +}; +//! [op:header] + +} // namespace TemplateExtension diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py index 22fe88c2612..769f70b5b3b 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py @@ -190,7 +190,7 @@ def train_network(config): if validDice_lungs.cpu() > bestValidDice_lungs: bestValidDice_lungs = validDice_lungs.cpu() - torch.save(net.state_dict(), save_path+'sumnet_best_lungs.pt') + torch.save(net.state_dict(), save_path+'/sumnet_best_lungs.pt') plot_graphs(train_values=trainLoss, valid_values=validLoss, save_path=save_path, x_label='Epochs', y_label='Loss', @@ -206,10 +206,10 @@ def train_network(config): print('Saving losses') - torch.save(trainLoss, save_path+'trainLoss.pt') - torch.save(validLoss, save_path+'validLoss.pt') - torch.save(trainDiceCoeff_lungs, save_path+'trainDice_lungs.pt') - torch.save(validDiceCoeff_lungs, save_path+'validDice_lungs.pt') + torch.save(trainLoss, save_path+'/trainLoss.pt') + torch.save(validLoss, save_path+'/validLoss.pt') + torch.save(trainDiceCoeff_lungs, save_path+'/trainDice_lungs.pt') + torch.save(validDiceCoeff_lungs, save_path+'/validDice_lungs.pt') # if epoch>1: # break diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py index 18066837aee..901dda3d09f 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py @@ -49,10 +49,10 @@ def lungpatch_classifier(config): save_path = save_path if not os.path.isdir(save_path): os.makedirs(save_path) + use_gpu = torch.cuda.is_available() net = LeNet() - use_gpu = torch.cuda.is_available() if use_gpu: net = net.cuda() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index ce25e69817b..ceb1dea3f15 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -4,7 +4,9 @@ import torch import json from .models import LeNet, R2U_Net, SUMNet, U_Net - +from openvino.inference_engine import IECore +import onnx +import onnxruntime def ch_shuffle(x): shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) @@ -56,6 +58,7 @@ def load_checkpoint(model, checkpoint): model.load_state_dict(model_checkpoint) else: model.state_dict() + return model def plot_graphs( train_values, valid_values, @@ -72,6 +75,18 @@ def plot_graphs( plt.savefig(os.path.join(save_path, save_name)) plt.close() +def load_inference_model(config, run_type): + + if run_type == 'onnx': + model = onnxruntime.InferenceSession(config['onnx_checkpoint']) + else: + ie = IECore() + model_xml = os.path.splitext(config['onnx_checkpoint'])[0] + ".xml" + model_bin = os.path.splitext(model_xml)[0] + ".bin" + model_temp = ie.read_network(model_xml, model_bin) + model = ie.load_network(network=model_temp, device_name='CPU') + return model + def create_dummy_json_file(json_path,stage): test_data_path = os.path.split(json_path)[0] if stage == 1: @@ -79,9 +94,10 @@ def create_dummy_json_file(json_path,stage): else: img_path = os.path.join(test_data_path,'stage2','img') file_list = os.listdir(img_path) - train_list = file_list[:7] - valid_list = file_list[7:10] - test_list = file_list[10:15] + no_files = len(file_list) + train_list = file_list[:int(0.6*no_files)] + valid_list = file_list[int(0.6*no_files):int(0.8*no_files)] + test_list = file_list[int(0.8*no_files):no_files] dummy_dict = { "train_set":train_list, "valid_set": valid_list, diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py index f8a7c447ff5..84e4d51049f 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_export.py @@ -10,7 +10,7 @@ class ExportTestStage1(unittest.TestCase): def setUpClass(cls): cls.config = get_config(action='export', stage=1) if not os.path.exists(cls.config['checkpoint']): - download_checkpoint(stage=1) + download_checkpoint() cls.model_path = cls.config['checkpoint'] def test_export_onnx(self): @@ -33,7 +33,6 @@ def test_export_ir(self): self.assertTrue(bin_status) def test_config(self): - self.config = get_config(action='export', stage=1) self.model_path = self.config['checkpoint'] self.input_shape = self.config['input_shape'] self.output_dir = os.path.split(self.model_path)[0] @@ -49,13 +48,10 @@ class ExportTestStage2(unittest.TestCase): def setUpClass(cls): cls.config = get_config(action='export', stage=2) if not os.path.exists(cls.config['checkpoint']): - download_checkpoint(stage=2) + download_checkpoint() cls.model_path = cls.config['checkpoint'] def test_export_onnx(self): - self.config = get_config(action='export', stage=2) - if not os.path.exists(self.config['checkpoint']): - download_checkpoint(stage=2) self.exporter = Exporter(self.config, stage=2) self.exporter.export_model_onnx() checkpoint = os.path.split(self.config['checkpoint'])[0] @@ -63,9 +59,6 @@ def test_export_onnx(self): checkpoint, self.config.get('model_name_onnx'))) def test_export_ir(self): - self.config = get_config(action='export', stage=2) - if not os.path.exists(self.config['checkpoint']): - download_checkpoint(stage=2) self.exporter = Exporter(self.config, stage=2) self.model_path = os.path.split(self.config['checkpoint'])[0] if not os.path.exists(os.path.join(self.model_path, self.config.get('model_name_onnx'))): @@ -81,7 +74,6 @@ def test_export_ir(self): self.assertTrue(bin_status) def test_config(self): - self.config = get_config(action='export', stage=2) self.model_path = self.config['checkpoint'] self.input_shape = self.config['input_shape'] self.output_dir = os.path.split(self.model_path)[0] diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py index e69de29bb2d..af738c7e99d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_inference.py @@ -0,0 +1,66 @@ +import os +import unittest +from src.utils.get_config import get_config +from src.utils.infer_stage1 import infer_lungseg +from src.utils.infer_stage2 import lungpatch_classifier +from src.utils.downloader import download_data +from src.utils.utils import create_dummy_json_file + +def create_inference_test_for_stage1(): + class InferenceTestStage1(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='inference', stage=1) + if not os.path.exists(cls.config['data_path']): + download_data() + if not os.path.exists(cls.config["json_path"]): + create_dummy_json_file(cls.config["json_path"], stage=1) + + def test_pytorch_inference(self): + dice = infer_lungseg(self.config, run_type='pytorch') + self.assertGreater(dice, 0) + + # def test_onnx_inference(self): + # dice = infer_lungseg(self.config, run_type='onnx') + # self.assertGreater(dice, 0) + + def test_ir_inference(self): + dice = infer_lungseg(self.config, run_type='ir') + self.assertGreaterEqual(dice, 0) + + return InferenceTestStage1 + +def create_inference_test_for_stage2(): + class InferenceTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='inference', stage=2) + + if not os.path.exists(cls.config['imgpath']): + download_data() + if not os.path.exists(cls.config["jsonpath"]): + create_dummy_json_file(cls.config["jsonpath"], stage=2) + def test_pytorch_inference(self): + accuracy = lungpatch_classifier(self.config, run_type='pytorch') + self.assertGreater(accuracy, 80) + + def test_onnx_inference(self): + accuracy = lungpatch_classifier(self.config, run_type='pytorch') + self.assertGreater(accuracy, 80) + + def test_ir_inference(self): + accuracy = lungpatch_classifier(self.config, run_type='pytorch') + self.assertGreater(accuracy, 80) + + return InferenceTest + + +class TestTrainer(create_inference_test_for_stage1()): + 'Test case for stage1' + +class TestTrainerEff(create_inference_test_for_stage2()): + 'Test case for stage2' + +if __name__ == '__main__': + + unittest.main() From 049c4d3c154485f24fbbafd96caff4798098e415 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Sat, 14 Jan 2023 16:55:59 +0530 Subject: [PATCH 08/47] updated readme --- .../lung_nodule_detection/ReadMe.md | 75 +++++++++++++++++-- .../lung_nodule_detection/src/inference.py | 14 ++-- .../lung_nodule_detection/tests/test_train.py | 2 +- 3 files changed, 74 insertions(+), 17 deletions(-) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md index c49dc158496..d3c88c9824d 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md +++ b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md @@ -32,18 +32,69 @@ The ground truth annotations were marked in a two-phase image annotation process >**License**: Both the datasets are published by the creators under [Creative Commons Attribution 3.0 Unported License](https://creativecommons.org/licenses/by/3.0/) -# Using the code -## Code Organization -Code directory is organised into 3 subfolders; Data preparation, Training and Evaluation. Each of these subfolders has a .py file and a package folder containing function definitions. -## Requirements +## Code and Directory Organization + +lung_nodule_detection/ + src/ + utils/ + data_prep/ + create_folds.py + generate_patches.py + generate_slices.py + visualize.py + downloader.py + data_loader.py + exporter.py + get_config.py + models.py + infer_stage1.py + infer_stage2.py + train_stage1.py + train_stage2.py + utils.py + export.py + inference.py + train.py + prepare_data.py + configs/ + stage1_config.json + stage2_config.json + download_config.json + media/ + tests/ + test_export.py + test_inference.py + test_train.py + init_venv.sh + README.md + requirements.txt + setup.py + +## System Specifications + +The code and models were tested on system with the following hardware and software specifications. +- Ubuntu* 16.04 +- Python* 3.6 +- NVidia* GPU for training +- 16GB RAM for inference + +# Using the code +## Creating Virtual Environment Create a virtual environment with all dependencies using ``` sh init_venv.sh ``` - -## Data preparation +The network used in stage 1, lung segmentation relies on MaxUnpool2D operation. This operation is not supported for ONNX and IR conversions in its current versions. As a work around we use [openvino_pytorch_layers](https://github.com/dkurt/openvino_pytorch_layers) and [max_unpool2d_decomposition.py](https://github.com/openvinotoolkit/openvino/pull/11400/files). + +After creating the virtual environment, users need to keep the `openvino_pytorch_layers` in the `src/utils/` directory and `max_unpool2d_decomposition.py` in `venv/lib/python3.9/site-packages/openvino/tools/mo/front/onnx/` directory. + +Thanks to [@dkurt](https://github.com/dkurt) for the solution. Users can follow the entire discussion [here](https://github.com/dkurt/openvino_pytorch_layers/issues/40). + +>**Note** This is needed only for exporting and inferencing the ONNX and IR model. + +## Data Preparation Follow the below steps to prepare and organise the data for training. > Details about the arguments being passed and its purpose is explained within the code. To see the details run `python prepare_data.py -h` @@ -88,8 +139,14 @@ To evaluate the classifier network execute `python inference.py --patchclass --savepath --imgpath ` ## Pre-trained Models -## Results +Pretrained models for inference are available [here](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi11/model_weights.zip). Users can also use the `downloader.py` script in utils directory to download the model. + +### Run Tests + +Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here]http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi11/test_data.zip). + +>**Note**: Unit tests for inference using ONNX model is commented/disabled at the moment as MaxUnpool2D operation is yet to be supported in onnxruntime. ## Acknowledgement @@ -117,3 +174,7 @@ email: rakshith.sathish@kgpian.iitkgp.ac.in
Github username: Rakshith2597 ## References + +
+[1] R. Sathish, R. Sathish, R. Sethuraman, D. Sheet.Lung Segmentation and Nodule Detection in Computed Tomography Scan using a Convolutional Neural Network Trained Adversarially using Turing Test Loss. In Proceedings of 42nd Annual International Conference of the IEEE Engineering in Medicine & Biology Society (EMBC), 2020. (link) +
diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py index 227a4efb5d3..e9d27a59b9b 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py @@ -5,15 +5,9 @@ def main(config): if config["lungseg"]: - foldno = config["foldno"] - savepath = config["savepath"] - jsonpath = config["jsonpath"] - network = config["network"] - infer_stage1.infer_lungseg(foldno,savepath,network,jsonpath) + infer_stage1.infer_lungseg(config, run_type=config['run_type']) else: - savepath = config["savepath"] - imgpath = config["imgpath"] - infer_stage2.lungpatch_classifier(savepath,imgpath) + infer_stage2.lungpatch_classifier(config, run_type=config['run_type']) if __name__ == '__main__': @@ -32,7 +26,9 @@ def main(config): parser.add_argument('--imgpath', help='Folder location where test images are stored') parser.add_argument('--network', - help='Network to be trained') + help='Network to be trained') + parser.add_argument('--runtype', + help='Select runtype: {pytorch, onnx, ir}') args = parser.parse_args() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py index c3ea9e37915..57d60539b31 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/tests/test_train.py @@ -44,7 +44,7 @@ def setUpClass(cls): def test_trainer(self): self.model = LeNet() loss_list = lungpatch_classifier(self.config) - self.assertLessEqual(loss_list[2], loss_list[0]) + self.assertLessEqual(loss_list[1], loss_list[0]) def test_config(self): self.config = get_config(action='train', stage=2) From 04b3436337710f80f6d8d096b8b647ddc0d91a1c Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Sat, 14 Jan 2023 20:27:54 +0530 Subject: [PATCH 09/47] pylint fixes --- .../lung_nodule_detection/ReadMe.md | 2 +- .../lung_nodule_detection/src/export.py | 4 +- .../lung_nodule_detection/src/inference.py | 4 +- .../lung_nodule_detection/src/prepare_data.py | 4 +- .../src/utils/data_loader.py | 51 +-- .../src/utils/data_prep/create_folds.py | 133 ++++--- .../src/utils/data_prep/generate_patches.py | 367 +++++++++++------- .../src/utils/data_prep/generate_slices.py | 128 +++--- .../src/utils/downloader.py | 2 +- .../src/utils/infer_stage1.py | 105 +++-- .../src/utils/infer_stage2.py | 27 +- .../lung_nodule_detection/src/utils/models.py | 6 +- .../src/utils/train_stage1.py | 119 +++--- .../src/utils/train_stage2.py | 85 ++-- .../lung_nodule_detection/src/utils/utils.py | 73 ++-- 15 files changed, 599 insertions(+), 511 deletions(-) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md index d3c88c9824d..c5975e4e616 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md +++ b/misc/pytorch_toolkit/lung_nodule_detection/ReadMe.md @@ -142,7 +142,7 @@ To evaluate the classifier network execute Pretrained models for inference are available [here](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi11/model_weights.zip). Users can also use the `downloader.py` script in utils directory to download the model. -### Run Tests +## Run Tests Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here]http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi11/test_data.zip). diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/export.py b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py index a6a2e7f6235..d1f8c83a83b 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/export.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/export.py @@ -29,10 +29,10 @@ def export(configs): args = parser.parse_args() - configs = { + config = { "onnx": args.onnx, "ir": args.ir, "stage": args.stage } - export(configs) + export(config) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py index e9d27a59b9b..b2e1a3b0831 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/inference.py @@ -26,9 +26,9 @@ def main(config): parser.add_argument('--imgpath', help='Folder location where test images are stored') parser.add_argument('--network', - help='Network to be trained') + help='Network to be trained') parser.add_argument('--runtype', - help='Select runtype: {pytorch, onnx, ir}') + help='Select runtype: {pytorch, onnx, ir}') args = parser.parse_args() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py index 8342b206185..d0ff7f4523f 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/prepare_data.py @@ -94,6 +94,6 @@ def main(args): parser.add_argument('--seriesuid', help='Seriesuid of slice to visualize') - args=parser.parse_args() + arg=parser.parse_args() - main(args) + main(arg) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py index 6291f8f764d..b248cf8b2d3 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_loader.py @@ -5,6 +5,7 @@ from PIL import Image import numpy as np + class LungDataLoader(data.Dataset): """Class represents the dataloader for Lung segmentation task @@ -24,63 +25,65 @@ class LungDataLoader(data.Dataset): Size of input image """ - def __init__(self,datapath,lung_path,json_file,split="train_set",is_transform= True,img_size= 512): - self.split=split - self.path= datapath - self.lung_path=lung_path + def __init__(self, datapath, lung_path, json_file, split="train_set", is_transform=True, img_size=512): + + self.split = split + self.path = datapath + self.lung_path = lung_path self.json = json_file self.files = self.json[self.split] - self.img_size= img_size - self.is_transform= is_transform - self.image_tf= transforms.Compose( + self.img_size = img_size + self.is_transform = is_transform + self.image_tf = transforms.Compose( [transforms.Resize(self.img_size), transforms.ToTensor() - ]) + ]) self.lung_tf = transforms.Compose( [transforms.Resize(self.img_size), - transforms.ToTensor() - ]) + transforms.ToTensor() + ]) def __len__(self): return len(self.files) - def __getitem__(self,index): + def __getitem__(self, index): filename = self.files[index] img = Image.fromarray(np.load(self.path+'img/'+filename).astype(float)) - lung_mask = Image.fromarray(np.load(self.path+'mask/'+filename).astype(float)) + lung_mask = Image.fromarray( + np.load(self.path+'mask/'+filename).astype(float)) if self.is_transform: - img, lung_mask = self.transform(img,lung_mask) - labels = torch.cat((1.-lung_mask,lung_mask)) # + img, lung_mask = self.transform(img, lung_mask) + labels = torch.cat((1.-lung_mask, lung_mask)) return img, labels - def transform(self,img,lung_mask): + def transform(self, img, lung_mask): img = self.image_tf(img) img = img.type(torch.FloatTensor) lung_mask = self.lung_tf(lung_mask) lung_mask = lung_mask.type(torch.FloatTensor) - return img,lung_mask + return img, lung_mask class LungPatchDataLoader(data.Dataset): - def __init__(self,imgpath,json_file,split="train_set",is_transform= True): + def __init__(self, imgpath, json_file, split="train_set", is_transform=True): self.split = split self.json = json_file self.files = self.json[self.split] self.is_transform = is_transform self.imgpath = imgpath - + def __len__(self): return len(self.files) - def __getitem__(self,index): + def __getitem__(self, index): filename = self.files[index] # For actual training with full dataset, uncomment below @@ -88,21 +91,21 @@ def __getitem__(self,index): # For testing with dummy data split_1 = filename.split('_')[1] l1 = int(float(split_1.split('.')[0])) - if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 + if l1 == 1: # Complement operator ~ gave negative labels eg: for label 0 o/p was 1 l2 = 0 else: l2 = 1 - label = torch.tensor([l1,l2]) + label = torch.tensor([l1, l2]) # For data prepared using dataprep script, uncomment below line # img = np.load(os.path.join(self.imgpath,filename)) - img = np.load(os.path.join(self.imgpath,'img',filename)) + img = np.load(os.path.join(self.imgpath, 'img', filename)) if self.is_transform: - img= self.transform(img) + img = self.transform(img) return img, label - def transform(self,img): + def transform(self, img): img = torch.Tensor(img).unsqueeze(0) img = img.type(torch.FloatTensor) return img diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py index fb02a24d26e..48559469657 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/create_folds.py @@ -1,13 +1,12 @@ import json import os from collections import defaultdict - import numpy as np from natsort import natsorted from tqdm import tqdm as tq -def positive_negative_classifier(data_path,save_path): +def positive_negative_classifier(data_path, save_path): """Classifies slices as positive and negative slices. If any non-zero value is present in the mask/GT of @@ -28,14 +27,14 @@ def positive_negative_classifier(data_path,save_path): mask_path = data_path+'/mask/' - file_list=natsorted(os.listdir(mask_path)) - positive_list,negative_list=[],[] + file_list = natsorted(os.listdir(mask_path)) + positive_list, negative_list = [], [] for file in tq(file_list): try: mask = np.load(mask_path+file) - if (np.any(mask)): + if np.any(mask): positive_list.append(file) else: negative_list.append(file) @@ -44,15 +43,13 @@ def positive_negative_classifier(data_path,save_path): print('Skipped %s ,Unable to locate corresponding mask') continue - with open(save_path+'positive_slices.json', 'w') as f: json.dump(positive_list, f) with open(save_path+'negative_slices.json', 'w') as g: json.dump(negative_list, g) - -def subset_classifier(dataset_path,save_path): +def subset_classifier(dataset_path, save_path): """ Classifies the slices according the subset of origin Parameters @@ -66,12 +63,12 @@ def subset_classifier(dataset_path,save_path): A dictionary consisting of filename according to their subset. """ - dict_subset={} - dict_subset = defaultdict(lambda:[],dict_subset) + dict_subset = {} + dict_subset = defaultdict(lambda: [], dict_subset) for i in range(10): - #Since 10 subsets are provided in the dataset. + # Since 10 subsets are provided in the dataset. for file in tq(os.listdir(dataset_path+'subset'+str(i))): - file_name=os.path.basename(file) + file_name = os.path.basename(file) if file_name.endswith(".mhd"): dict_subset['subset'+str(i)].append(file_name) @@ -81,8 +78,7 @@ def subset_classifier(dataset_path,save_path): return dict_subset - -def assign_folds(dict_subset,save_path): +def assign_folds(dict_subset, save_path): """ Divides subsets into train,validation and testing sets of corresponding folds Parameters @@ -97,29 +93,36 @@ def assign_folds(dict_subset,save_path): None """ - dataset_list=[dict_subset['subset0'],dict_subset['subset1'], - dict_subset['subset2'],dict_subset['subset3'], - dict_subset['subset4'],dict_subset['subset5'], - dict_subset['subset6'],dict_subset['subset7'], - dict_subset['subset8'],dict_subset['subset9']] + dataset_list = [dict_subset['subset0'], dict_subset['subset1'], + dict_subset['subset2'], dict_subset['subset3'], + dict_subset['subset4'], dict_subset['subset5'], + dict_subset['subset6'], dict_subset['subset7'], + dict_subset['subset8'], dict_subset['subset9']] - for i in tq(range(10)): #10 Subsets in the dataset + for i in tq(range(10)): # 10 Subsets in the dataset - fold={} - fold = defaultdict(lambda:0,fold) - fold['train_set']=dataset_list[0-i]+dataset_list[1-i]+dataset_list[2-i]+dataset_list[3-i]+dataset_list[4-i]+dataset_list[5-i]+dataset_list[6-i]+dataset_list[7-i] + fold = {} + fold = defaultdict(lambda: 0, fold) + fold['train_set'] = dataset_list[0-i] +\ + dataset_list[1-i]+dataset_list[2-i] +\ + dataset_list[3-i]+dataset_list[4-i] +\ + dataset_list[5-i]+dataset_list[6-i] +\ + dataset_list[7-i] - fold['valid_set']=dataset_list[8-i] - fold['test_set']=dataset_list[9-i] + fold['valid_set'] = dataset_list[8-i] + fold['test_set'] = dataset_list[9-i] - - fold_name='fold'+str(i)+'_mhd.json' + fold_name = 'fold'+str(i)+'_mhd.json' with open(save_path+fold_name, 'w') as j: json.dump(fold, j) -def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test): - +def add_additional_slices( + series_uid_npylist, + fold_npy, + series_uid_train, + series_uid_val, + series_uid_test): """Adds additional negative slices to the prepared datalist Parameters @@ -135,6 +138,11 @@ def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_ui dict """ + count = {} + with open('positive_slices.json') as c: + pos_slices_json = json.load(c) + + pos_list = [x.split('.mhd')[0] for x in pos_slices_json] for i in (series_uid_train): c = series_uid_npylist.count(i) @@ -145,11 +153,10 @@ def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_ui for j in range(5): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['train_set'].append(file) - for j in range(count[i]-5,count[i]): + for j in range(count[i]-5, count[i]): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['train_set'].append(file) - for i in (series_uid_val): c = series_uid_npylist.count(i) count[i] = c @@ -159,11 +166,10 @@ def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_ui for j in range(5): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['valid_set'].append(file) - for j in range(count[i]-5,count[i]): + for j in range(count[i]-5, count[i]): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['valid_set'].append(file) - for i in (series_uid_test): c = series_uid_npylist.count(i) count[i] = c @@ -173,14 +179,14 @@ def add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_ui for j in range(5): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['test_set'].append(file) - for j in range(count[i]-5,count[i]): + for j in range(count[i]-5, count[i]): file = str(i)+'_slice'+str(j)+'.npy' fold_npy['test_set'].append(file) return fold_npy -def create_balanced_dataset(save_path,data_path,additional=False): +def create_balanced_dataset(save_path, data_path, additional=False): """Creates balanced dataset with equal positive and negative slices Parameters @@ -199,46 +205,38 @@ def create_balanced_dataset(save_path,data_path,additional=False): """ img_path = data_path+'/img/' - - with open(save_path+'positive_slices.json') as c: - pos_slices_json=json.load(c) - - - pos_list=[x.split('.mhd')[0] for x in pos_slices_json] - pos_list_uq=np.unique(np.array(pos_list)) - + with open('positive_slices.json') as c: + pos_slices_json = json.load(c) print('Sorting entire image set. Will take time.') - sorted_list=natsorted(os.listdir(img_path)) + sorted_list = natsorted(os.listdir(img_path)) print('Sorting completed') - for i in tq(range(10)): with open(save_path+'fold'+str(i)+'_mhd.json') as f: - j_data=json.load(f) - - pos_count=0 - neg_count=0 - count = {} - fold_npy={} - fold_npy = defaultdict(lambda:[],fold_npy) - series_uid_train=[x.split('.mhd')[0] for x in j_data['train_set']] - series_uid_val=[x.split('.mhd')[0] for x in j_data['valid_set']] - series_uid_test=[x.split('.mhd')[0] for x in j_data['test_set']] - fold_npy_name='fold'+str(i)+'_pos_neg_eq.json' + j_data = json.load(f) + + pos_count = 0 + neg_count = 0 + fold_npy = {} + fold_npy = defaultdict(lambda: [], fold_npy) + series_uid_train = [x.split('.mhd')[0] for x in j_data['train_set']] + series_uid_val = [x.split('.mhd')[0] for x in j_data['valid_set']] + series_uid_test = [x.split('.mhd')[0] for x in j_data['test_set']] + fold_npy_name = 'fold'+str(i)+'_pos_neg_eq.json' # series_uid_npylist=[x.split('_')[0] for x in npy_list] # series_uid_npylist_uq=np.unique(np.array(series_uid_npylist)) - for f,name in enumerate(sorted_list): + for f, name in enumerate(sorted_list): for q in series_uid_train: - if q in name : + if q in name: if name in pos_slices_json: - #pos_slices_json contains the list of all positive slices. + # pos_slices_json contains the list of all positive slices. pos_count += 1 fold_npy['train_set'].append(name) - elif pos_count>neg_count: + elif pos_count > neg_count: # Here the slice will be negative since 'name' not in pos_slices neg_count += 1 fold_npy['train_set'].append(name) @@ -247,15 +245,14 @@ def create_balanced_dataset(save_path,data_path,additional=False): else: continue - for q in series_uid_val: - if q in name : + if q in name: if name in pos_slices_json: pos_count += 1 fold_npy['valid_set'].append(name) - elif pos_count>neg_count: + elif pos_count > neg_count: neg_count += 1 fold_npy['valid_set'].append(name) @@ -265,11 +262,11 @@ def create_balanced_dataset(save_path,data_path,additional=False): continue for q in series_uid_test: - if q in name : + if q in name: if name in pos_slices_json: pos_count += 1 fold_npy['test_set'].append(name) - elif pos_count>neg_count: + elif pos_count > neg_count: neg_count += 1 fold_npy['test_set'].append(name) else: @@ -277,10 +274,10 @@ def create_balanced_dataset(save_path,data_path,additional=False): else: continue with open(save_path+fold_npy_name, 'w') as z: - json.dump(fold_npy,z) + json.dump(fold_npy, z) - if additional == True: - fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) + # if additional: + # fold_npy = add_additional_slices(series_uid_npylist,fold_npy,series_uid_train,series_uid_val,series_uid_test) print('Balanced dataset generated and saved') diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py index 2795f1e1f1e..08ab2f976de 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_patches.py @@ -3,11 +3,11 @@ import cv2 import json from tqdm import tqdm as tq -import matplotlib.pyplot as plt import random from collections import defaultdict -def generate_patchlist(save_path,patchtype,fold_no=0): + +def generate_patchlist(save_path, patchtype, fold_no=0): """Generates positive slices in each fold Parameters @@ -28,7 +28,7 @@ def generate_patchlist(save_path,patchtype,fold_no=0): with open(save_path+'fold'+str(fold_no)+'_pos_neg_eq.json') as file: j_data = json.load(file) with open(save_path+'/'+patchtype+'_slices.json') as c: - pos_slices_json=json.load(c) + pos_slices_json = json.load(c) # print(pos_slices_json) @@ -51,18 +51,17 @@ def generate_patchlist(save_path,patchtype,fold_no=0): if i in pos_slices_json: test_seg_list.append(i) - patch_npy={} - patch_npy = defaultdict(lambda:[],patch_npy) + patch_npy = {} + patch_npy = defaultdict(lambda: [], patch_npy) patch_npy['train_set'] = train_seg_list patch_npy['valid_set'] = val_seg_list patch_npy['test_set'] = test_seg_list with open(save_path+'/'+patchtype+'_patchlist_f'+str(fold_no)+'.json', 'w') as z: - json.dump(patch_npy,z) - + json.dump(patch_npy, z) -def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,category='train_set'): +def generate_negative_patch(jsonpath, fold, data_path, lung_segpath, savepath, category='train_set'): """Gereates patches which doesn't have nodules Parameters @@ -97,83 +96,101 @@ def generate_negative_patch(jsonpath,fold,data_path,lung_segpath,savepath,catego size = 64 index = 0 for img_name in tq(nm_list): - #Loading the masks as uint8 as threshold function accepts 8bit image as parameter. - img = np.load(os.path.join(img_dir, img_name)).astype(np.float32)#*255 - mask = np.load(os.path.join(mask_dir, img_name)).astype(np.uint8)#*255 + # Loading the masks as uint8 as threshold function accepts 8bit image as parameter. + img = np.load(os.path.join(img_dir, img_name) + ).astype(np.float32) # *255 + mask = np.load(os.path.join(mask_dir, img_name) + ).astype(np.uint8) # *255 if np.any(mask): - #Convert grayscale image to binary - _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value - contours, hierarchy = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + # Convert grayscale image to binary + # parameters are ip_img,threshold,max_value + _, th_mask = cv2.threshold(mask, 0.5, 1, 0, cv2.THRESH_BINARY) + contours, _ = cv2.findContours( + th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda x: cv2.contourArea(x)) - #In certain cases there could be more than 2 contour, hence taking the largest 2 which will be lung + # In certain cases there could be more than 2 contour, hence taking the largest 2 which will be lung contours = contours[1:] - for cntr in contours: patch_count = 2 for _ in range(patch_count): - xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width + # Gives X,Y cordinate of BBox origin,height and width + xr, yr, wr, hr = cv2.boundingRect(cntr) # xc,yc = xr+wr/2,yr+hr/2 try: - x, y = random.randrange(xr, xr+wr-size/2),random.randrange(yr, yr+hr-size/2) + x, y = random.randrange( + xr, xr+wr-size/2), random.randrange(yr, yr+hr-size/2) except: prob = random.randrange(0, 1) - if prob>0.5: - x, y = random.randrange(xr, xr+wr/2),random.randrange(yr, yr+hr/2) + if prob > 0.5: + x, y = random.randrange( + xr, xr+wr/2), random.randrange(yr, yr+hr/2) else: - x, y = random.randrange(int(xr+wr/2),xr+wr),random.randrange(int(yr+hr/2),yr+hr) + x, y = random.randrange( + int(xr+wr/2), xr+wr), random.randrange(int(yr+hr/2), yr+hr) - if x+size<512 & y+size<512: - patch_img = img[y: y+size, x: x+size].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) + if x+size < 512 & y+size < 512: + patch_img = img[y: y+size, x: x + + size].copy().astype(np.float16) + patch_mask = np.zeros((size, size)).astype(np.float16) else: - if x-size<=0 & y-size<=0: - patch_img = img[0: size, 0: size].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) - - elif x-size<=0 & y-size>0: - patch_img = img[y-size: y, 0: size].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) - - elif x-size>0 & y-size<=0: - patch_img = img[0: size, x-size: x].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) + if x-size <= 0 & y-size <= 0: + patch_img = img[0: size, 0: size].copy().astype( + np.float16) + patch_mask = np.zeros( + (size, size)).astype(np.float16) + + elif x-size <= 0 & y-size > 0: + patch_img = img[y-size: y, + 0: size].copy().astype(np.float16) + patch_mask = np.zeros( + (size, size)).astype(np.float16) + + elif x-size > 0 & y-size <= 0: + patch_img = img[0: size, x - + size: x].copy().astype(np.float16) + patch_mask = np.zeros( + (size, size)).astype(np.float16) else: - patch_img = img[y-size: y, x-size: x].copy().astype(np.float16) - patch_mask = np.zeros((size,size)).astype(np.float16) + patch_img = img[y-size: y, x - + size: x].copy().astype(np.float16) + patch_mask = np.zeros( + (size, size)).astype(np.float16) - - - if np.shape(patch_img) != (64,64): - print('shape',np.shape(patch_img)) - print('cordinate of patch',x,x+size,y,y+size) - print('cordinate of BBox',xr,yr,wr,hr) + if np.shape(patch_img) != (64, 64): + print('shape', np.shape(patch_img)) + print('cordinate of patch', x, x+size, y, y+size) + print('cordinate of BBox', xr, yr, wr, hr) index += 1 img_savepath = savepath+'/patches/'+'/img/' mask_savepath = savepath+'/patches/'+'/mask/' if not os.path.isdir(img_savepath): os.makedirs(savepath+'/patches/'+'/img/') - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img) else: - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img) if not os.path.isdir(mask_savepath): os.makedirs(savepath+'/patches/'+'/mask/') - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask) else: - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask) -def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set'): +def generate_positive_patch(jsonpath, fold, data_path, savepath, category='train_set'): """Generate patches with nodules Parameters @@ -208,56 +225,70 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set size = 64 index = 0 for img_name in tq(nm_list): - #Loading the masks as uint8 as threshold function accepts 8bit image as parameter. + # Loading the masks as uint8 as threshold function accepts 8bit image as parameter. img = np.load(os.path.join(img_dir, img_name)).astype(np.float16) mask = np.load(os.path.join(mask_dir, img_name))/255 mask = mask.astype(np.uint8) if np.any(mask): - #Convert grayscale image to binary - _, th_mask = cv2.threshold(mask, 0.5, 1, 0,cv2.THRESH_BINARY) #parameters are ip_img,threshold,max_value - contours, _ = cv2.findContours(th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + # Convert grayscale image to binary + # parameters are ip_img,threshold,max_value + _, th_mask = cv2.threshold(mask, 0.5, 1, 0, cv2.THRESH_BINARY) + contours, _ = cv2.findContours( + th_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours = sorted(contours, key=lambda x: cv2.contourArea(x)) for cntr in contours: patch_count = 4 - xr,yr,wr,hr = cv2.boundingRect(cntr) #Gives X,Y cordinate of BBox origin,height and width - xc,yc = int(xr+wr/2),int(yr+hr/2) - - if int(yc-size/2) <0 or int(xc-size/2)<0: - if int(yc-size/2) <0 and int(xc-size/2)<0: - patch_img1 = img[0:size , 0:size].copy().astype(np.float16) - patch_mask1 = mask[0:size , 0:size].copy().astype(np.float16) - - elif int(yc-size/2) >0 and int(xc-size/2)<0: - patch_img1 = img[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) - patch_mask1 = mask[int(yc-size/2):int(yc+size/2) , 0:size].copy().astype(np.float16) - - elif int(yc-size/2) <0 and int(xc-size/2)>0: - patch_img1 = img[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - patch_mask1 = mask[0:size ,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - - - elif int(yc+size/2)>512 or int(xc+size/2)>512: - if int(yc+size/2)>512 and int(xc+size/2)>512: + # Gives X,Y cordinate of BBox origin,height and width + xr, yr, wr, hr = cv2.boundingRect(cntr) + xc, yc = int(xr+wr/2), int(yr+hr/2) + + if int(yc-size/2) < 0 or int(xc-size/2) < 0: + if int(yc-size/2) < 0 and int(xc-size/2) < 0: + patch_img1 = img[0:size, 0:size].copy().astype( + np.float16) + patch_mask1 = mask[0:size, + 0:size].copy().astype(np.float16) + + elif int(yc-size/2) > 0 and int(xc-size/2) < 0: + patch_img1 = img[int( + yc-size/2):int(yc+size/2), 0:size].copy().astype(np.float16) + patch_mask1 = mask[int( + yc-size/2):int(yc+size/2), 0:size].copy().astype(np.float16) + + elif int(yc-size/2) < 0 and int(xc-size/2) > 0: + patch_img1 = img[0:size, int( + xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[0:size, int( + xc-size/2):int(xc+size/2)].copy().astype(np.float16) + + elif int(yc+size/2) > 512 or int(xc+size/2) > 512: + if int(yc+size/2) > 512 and int(xc+size/2) > 512: m = yc+size - 512 n = xc + size - 512 - patch_img1 = img[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) - patch_mask1 = mask[int(yc-m):512,int(xc-n):512].copy().astype(np.float16) + patch_img1 = img[int( + yc-m):512, int(xc-n):512].copy().astype(np.float16) + patch_mask1 = mask[int( + yc-m):512, int(xc-n):512].copy().astype(np.float16) - elif int(yc+size/2)>512 and int(xc+size/2)<512: + elif int(yc+size/2) > 512 and int(xc+size/2) < 512: m = yc+size - 512 - patch_img1 = img[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - patch_mask1 = mask[int(yc-m):512,int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_img1 = img[int( + yc-m):512, int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) + patch_mask1 = mask[int( + yc-m):512, int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - elif int(yc+size/2)<512 and int(xc+size/2)>512: + elif int(yc+size/2) < 512 and int(xc+size/2) > 512: n = xc+size - 512 - patch_img1 = img[int(yc-size/2):int(yc+size/2),int(xc-n):512].copy().astype(np.float16) - patch_mask1 = mask[int(yc-size/2):int(yc+size/2),int(xc-n):512].copy().astype(np.float16) + patch_img1 = img[int( + yc-size/2):int(yc+size/2), int(xc-n):512].copy().astype(np.float16) + patch_mask1 = mask[int( + yc-size/2):int(yc+size/2), int(xc-n):512].copy().astype(np.float16) - elif (int(yc-size/2)>=0 and int(yc+size/2)<=512) : - if(int(xc-size/2)>=0 and int(xc+size/2)<=512): + elif (int(yc-size/2) >= 0 and int(yc+size/2) <= 512): + if (int(xc-size/2) >= 0 and int(xc+size/2) <= 512): patch_img1 = img[ int(yc-size/2):int(yc+size/2), int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) @@ -265,127 +296,165 @@ def generate_positive_patch(jsonpath,fold,data_path,savepath,category='train_set int(yc-size/2):int(yc+size/2), int(xc-size/2):int(xc+size/2)].copy().astype(np.float16) - if np.shape(patch_img1) != (64,64): - print('shape',np.shape(patch_img1)) - print('cordinate of patch',x,x+size,y,y+size) - print('cordinate of BBox',xr,yr,wr,hr) + if np.shape(patch_img1) != (64, 64): + print('shape', np.shape(patch_img1)) + print('cordinate of BBox', xr, yr, wr, hr) img_savepath = savepath+'/patches/'+category+'/img/' mask_savepath = savepath+'/patches/'+category+'/mask/' if not os.path.isdir(img_savepath): os.makedirs(savepath+'/patches/'+category+'/img/') - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img1) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img1) else: - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img1) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img1) if not os.path.isdir(mask_savepath): os.makedirs(savepath+'/patches/'+category+'/mask/') - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask1) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask1) else: - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask1) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask1) index += 1 for i in range(patch_count): - xc,yc = xr,yr - xc,yc = xr+wr,yr+hr + xc, yc = xr, yr + xc, yc = xr+wr, yr+hr if i == 0: - if xc+size<512 and yc+size<512: - patch_img = img[yc:yc+size,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[yc:yc+size,xc:xc+size].copy().astype(np.float16) + if xc+size < 512 and yc+size < 512: + patch_img = img[yc:yc+size, xc:xc + + size].copy().astype(np.float16) + patch_mask = mask[yc:yc+size, xc:xc + + size].copy().astype(np.float16) - elif xc+size>512 and yc+size<512: + elif xc+size > 512 and yc+size < 512: m = xc+size-512 - patch_img = img[yc:yc+size,xc-m:xc+size-m].copy().astype(np.float16) - patch_mask = mask[yc:yc+size,xc-m:xc+size-m].copy().astype(np.float16) + patch_img = img[yc:yc+size, xc-m:xc + + size-m].copy().astype(np.float16) + patch_mask = mask[yc:yc+size, xc - + m:xc+size-m].copy().astype(np.float16) - elif xc+size<512 and yc+size>512: + elif xc+size < 512 and yc+size > 512: n = yc+size-512 - patch_img = img[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,xc:xc+size].copy().astype(np.float16) + patch_img = img[yc-n:yc+size-n, + xc:xc+size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n, + xc:xc+size].copy().astype(np.float16) else: m = xc+size-512 n = yc+size-512 - patch_img = img[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,xc-m:xc+size-m].copy().astype(np.float16) - elif i ==1: - - if xc-size>0 and yc+size<512: - patch_img = img[yc:yc+size,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[yc:yc+size,xc-size:xc].copy().astype(np.float16) - - elif xc-size<0 and yc+size<512: - - patch_img = img[yc:yc+size,0:size].copy().astype(np.float16) - patch_mask = mask[yc:yc+size,0:size].copy().astype(np.float16) - - elif xc-size>0 and yc+size>512: + patch_img = img[yc-n:yc+size-n, xc - + m:xc+size-m].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n, xc - + m:xc+size-m].copy().astype(np.float16) + elif i == 1: + + if xc-size > 0 and yc+size < 512: + patch_img = img[yc:yc+size, xc - + size:xc].copy().astype(np.float16) + patch_mask = mask[yc:yc+size, xc - + size:xc].copy().astype(np.float16) + + elif xc-size < 0 and yc+size < 512: + + patch_img = img[yc:yc+size, + 0:size].copy().astype(np.float16) + patch_mask = mask[yc:yc+size, + 0:size].copy().astype(np.float16) + + elif xc-size > 0 and yc+size > 512: n = yc+size-512 - patch_img = img[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,xc-size:xc].copy().astype(np.float16) + patch_img = img[yc-n:yc+size-n, xc - + size:xc].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n, + xc-size:xc].copy().astype(np.float16) else: n = yc+size-512 - patch_img = img[yc-n:yc+size-n,0:size].copy().astype(np.float16) - patch_mask = mask[yc-n:yc+size-n,0:size].copy().astype(np.float16) - elif i ==2: + patch_img = img[yc-n:yc+size-n, + 0:size].copy().astype(np.float16) + patch_mask = mask[yc-n:yc+size-n, + 0:size].copy().astype(np.float16) + elif i == 2: - if xc+size<512 and yc-size>0: - patch_img = img[yc-size:yc,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc:xc+size].copy().astype(np.float16) + if xc+size < 512 and yc-size > 0: + patch_img = img[yc-size:yc, xc:xc + + size].copy().astype(np.float16) + patch_mask = mask[yc-size:yc, xc:xc + + size].copy().astype(np.float16) - elif xc+size>512 and yc-size>0: + elif xc+size > 512 and yc-size > 0: m = xc+size-512 - patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + patch_img = img[yc-size:yc, xc-m:xc + + size-m].copy().astype(np.float16) + patch_mask = mask[yc-size:yc, xc - + m:xc+size-m].copy().astype(np.float16) - elif xc+size<512 and yc-size<0: - patch_img = img[0:size,xc:xc+size].copy().astype(np.float16) - patch_mask = mask[0:size,xc:xc+size].copy().astype(np.float16) + elif xc+size < 512 and yc-size < 0: + patch_img = img[0:size, xc:xc + + size].copy().astype(np.float16) + patch_mask = mask[0:size, xc:xc + + size].copy().astype(np.float16) else: m = xc+size-512 - patch_img = img[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc-m:xc+size-m].copy().astype(np.float16) + patch_img = img[yc-size:yc, xc-m:xc + + size-m].copy().astype(np.float16) + patch_mask = mask[yc-size:yc, xc - + m:xc+size-m].copy().astype(np.float16) - elif i==3: + elif i == 3: - if xc-size>0 and yc-size>0: - patch_img = img[yc-size:yc,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,xc-size:xc].copy().astype(np.float16) + if xc-size > 0 and yc-size > 0: + patch_img = img[yc-size:yc, xc - + size:xc].copy().astype(np.float16) + patch_mask = mask[yc-size:yc, xc - + size:xc].copy().astype(np.float16) - elif xc-size<0 and yc-size>0: + elif xc-size < 0 and yc-size > 0: m = xc+size-512 - patch_img = img[yc-size:yc,0:size].copy().astype(np.float16) - patch_mask = mask[yc-size:yc,0:size].copy().astype(np.float16) + patch_img = img[yc-size:yc, + 0:size].copy().astype(np.float16) + patch_mask = mask[yc-size:yc, + 0:size].copy().astype(np.float16) - elif xc-size>0 and yc-size<0: - patch_img = img[0:size,xc-size:xc].copy().astype(np.float16) - patch_mask = mask[0:size,xc-size:xc].copy().astype(np.float16) + elif xc-size > 0 and yc-size < 0: + patch_img = img[0:size, xc - + size:xc].copy().astype(np.float16) + patch_mask = mask[0:size, xc - + size:xc].copy().astype(np.float16) else: - patch_img = img[0:size,0:size].copy().astype(np.float16) - patch_mask = mask[0:size,0:size].copy().astype(np.float16) - + patch_img = img[0:size, 0:size].copy().astype( + np.float16) + patch_mask = mask[0:size, + 0:size].copy().astype(np.float16) - if np.shape(patch_img) != (64,64): - print('shape',np.shape(patch_img)) + if np.shape(patch_img) != (64, 64): + print('shape', np.shape(patch_img)) img_savepath = savepath+'/patches/'+category+'/img/' mask_savepath = savepath+'/patches/'+category+'/mask/' if not os.path.isdir(img_savepath): os.makedirs(savepath+'/patches/'+category+'/img/') - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img) else: - np.save(img_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_img) + np.save(img_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_img) if not os.path.isdir(mask_savepath): os.makedirs(savepath+'/patches/'+category+'/mask/') - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask) else: - np.save(mask_savepath+'patch_'+str(fold)+'_'+str(index)+'.npy',patch_mask) + np.save(mask_savepath+'patch_'+str(fold) + + '_'+str(index)+'.npy', patch_mask) index += 1 diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py index 325dc3dc6d8..6d954b2a464 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/data_prep/generate_slices.py @@ -7,7 +7,7 @@ from tqdm import tqdm as tq -def make_mask(height,width,slice_list,*args, **kwargs): +def make_mask(height, width, slice_list, *args, **kwargs): """Creates masks from the annotations given. Parameters @@ -27,24 +27,25 @@ def make_mask(height,width,slice_list,*args, **kwargs): """ - mask=np.zeros((height,width)) - n=kwargs.get('n', None) - point_dictx=kwargs.get('ii', None) - point_dicty=kwargs.get('jj', None) - + mask = np.zeros((height, width)) + n = kwargs.get('n', None) + point_dictx = kwargs.get('ii', None) + point_dicty = kwargs.get('jj', None) if n in slice_list: - temp_listx=point_dictx[n] - temp_listy=point_dicty[n] - plot_listx= [sum(x)/len(point_dictx[n]) for x in zip(*temp_listx)] - plot_listy= [sum(y)/len(point_dicty[n]) for y in zip(*temp_listy)] - merged_list =np.array([[plot_listy[i],plot_listx[i]] for i in range(0, len(plot_listx))]) + temp_listx = point_dictx[n] + temp_listy = point_dicty[n] + plot_listx = [sum(x)/len(point_dictx[n]) for x in zip(*temp_listx)] + plot_listy = [sum(y)/len(point_dicty[n]) for y in zip(*temp_listy)] + merged_list = np.array([[plot_listy[i], plot_listx[i]] + for i in range(0, len(plot_listx))]) - cv2.fillPoly(mask,pts=np.int32([merged_list]),color=(255,255,255)) + cv2.fillPoly(mask, pts=np.int32([merged_list]), color=(255, 255, 255)) return mask -def extract_slices(dataset_path,save_path,masktype='nodule'): + +def extract_slices(dataset_path, save_path, masktype='nodule'): """Extracts induvidual slices from the CT volumes given in the dataset, clips the max-min values and stores them as numpy arrays. @@ -64,64 +65,68 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): None """ - - file_list=[] + file_list = [] for tr in tq(range(10)): - subset_path=dataset_path+"/subset"+str(tr)+"/" + subset_path = dataset_path+"/subset"+str(tr)+"/" for file in os.listdir(subset_path): if file.endswith(".mhd"): file_list.append(os.path.join(subset_path, file)) - for file in tq(file_list): - file_name=os.path.basename(file) - series_instance_uid=os.path.splitext(file_name)[0] - img_file=file + file_name = os.path.basename(file) + series_instance_uid = os.path.splitext(file_name)[0] + img_file = file itk_img = sitk.ReadImage(img_file) img_array = sitk.GetArrayFromImage(itk_img) num_slice, height, width = img_array.shape - #Has the image data + # Has the image data - scan = pl.query(pl.Scan).filter(pl.Scan.series_instance_uid== series_instance_uid).first() + scan = pl.query(pl.Scan).filter( + pl.Scan.series_instance_uid == series_instance_uid).first() - #Maped the image data with annotation using series id + # Maped the image data with annotation using series id - nods = scan.cluster_annotations() #Function used to determine which annotation belongs to which nodule + # Function used to determine which annotation belongs to which nodule + nods = scan.cluster_annotations() - nodule_dict={} #Dict to store number of contour markings for that nodule - slice_list=[] # List to store the slices which has nodules marked - points_dictx={} # These dicts are to store the points to be plotted (key=slice_index, ) - points_dicty={} - points_dictx = defaultdict(lambda:[],points_dictx) - points_dicty = defaultdict(lambda:[],points_dicty) + nodule_dict = {} # Dict to store number of contour markings for that nodule + slice_list = [] # List to store the slices which has nodules marked + # These dicts are to store the points to be plotted (key=slice_index, ) + points_dictx = {} + points_dicty = {} + points_dictx = defaultdict(lambda: [], points_dictx) + points_dicty = defaultdict(lambda: [], points_dicty) for nod in nods: - nodule_dict[i]=len(nod) #Stores a dict which has count of annotation for each nodule - - for key,value in nodule_dict.items(): - #if value>=3 : #Taking annotations provided by 3 or more annotator - for i in range(value): - ann=nods[key][i] #-1 to keep index correct - con=ann.contours[0] #All coutours for specific nodule collected - - k = con.image_k_position # Returns the slice number/index which has the nodule - slice_list.append(k) - ii,jj = ann.contours[0].to_matrix(include_k=False).T - points_dictx[k].append(ii) - points_dicty[k].append(jj) + # Stores a dict which has count of annotation for each nodule + nodule_dict[i] = len(nod) + + for key, value in nodule_dict.items(): + # if value>=3 : #Taking annotations provided by 3 or more annotator + for i in range(value): + ann = nods[key][i] # -1 to keep index correct + # All coutours for specific nodule collected + con = ann.contours[0] + + k = con.image_k_position # Returns the slice number/index which has the nodule + slice_list.append(k) + ii, jj = ann.contours[0].to_matrix(include_k=False).T + points_dictx[k].append(ii) + points_dicty[k].append(jj) # !!Note!! The pylidc package gives cordinates for single slices, If more than one annotaions are give then # Sum(x)/total no: of annotation for all provided pixel is given as input - for n in range(1,num_slice): + for n in range(1, num_slice): - image=(img_array[n].copy()).astype(np.float32) + image = (img_array[n].copy()).astype(np.float32) im_max = np.max(image) if im_max != 0: - image[image>1000]=1000 - image[image<-1000]=-1000 - mask=make_mask(height,width,slice_list,ii=points_dictx,jj=points_dicty,n=n) + image[image > 1000] = 1000 + image[image < -1000] = -1000 + mask = make_mask(height, width, slice_list, + ii=points_dictx, jj=points_dicty, n=n) mask = np.array(mask, dtype=np.float32) image = image - image.min() image = image/image.max() @@ -131,17 +136,22 @@ def extract_slices(dataset_path,save_path,masktype='nodule'): if not os.path.isdir(save_path+'/img'): os.makedirs(save_path+'/img') - np.save(save_path+'/img/'+series_instance_uid+'_slice'+str(n)+'.npy',image) + np.save(save_path+'/img/'+series_instance_uid + + '_slice'+str(n)+'.npy', image) else: - np.save(save_path+'/img/'+series_instance_uid+'_slice'+str(n)+'.npy',image) + np.save(save_path+'/img/'+series_instance_uid + + '_slice'+str(n)+'.npy', image) if not os.path.isdir(save_path+'/mask'): os.makedirs(save_path+'/mask') - np.save(save_path+'/mask/'+series_instance_uid+'_slice'+str(n)+'.npy',mask) + np.save(save_path+'/mask/'+series_instance_uid + + '_slice'+str(n)+'.npy', mask) else: - np.save(save_path+'/mask/'+series_instance_uid+'_slice'+str(n)+'.npy',mask) + np.save(save_path+'/mask/'+series_instance_uid + + '_slice'+str(n)+'.npy', mask) + -def generate_lungseg(dataset_path,save_path): +def generate_lungseg(dataset_path, save_path): file_list = [] for file in os.listdir(dataset_path): @@ -149,14 +159,16 @@ def generate_lungseg(dataset_path,save_path): file_list.append(os.path.join(dataset_path, file)) for img_file in tq(file_list): - file_name=os.path.basename(img_file) - series_instance_uid=os.path.splitext(file_name)[0] + file_name = os.path.basename(img_file) + series_instance_uid = os.path.splitext(file_name)[0] itk_img = sitk.ReadImage(img_file) img_array = sitk.GetArrayFromImage(itk_img) num_slice, _, _ = img_array.shape - for n in range(1,num_slice): + for n in range(1, num_slice): if not os.path.isdir(save_path+'/lungseg'): os.makedirs(save_path+'/lungseg') - np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) + np.save(save_path+'/lungseg/'+series_instance_uid + + '_slice'+str(n)+'.npy', img_array[n]) else: - np.save(save_path+'/lungseg/'+series_instance_uid+'_slice'+str(n)+'.npy',img_array[n]) + np.save(save_path+'/lungseg/'+series_instance_uid + + '_slice'+str(n)+'.npy', img_array[n]) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py index 213c80e733c..4a4c3517d91 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/downloader.py @@ -28,4 +28,4 @@ def download_data(): if __name__ == '__main__': download_data() - download_checkpoint() \ No newline at end of file + download_checkpoint() diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py index 987cab7e846..bdbc54a5107 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage1.py @@ -12,29 +12,14 @@ from .utils import dice_coefficient, load_inference_model, load_checkpoint plt.switch_backend('agg') + + def to_numpy(tensor): return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() def infer_lungseg(config, run_type='pytorch'): - """ Inference script for lung segmentation - - Parameters - ---------- - fold_no: int - Fold number to which action is to be performed - save_path: str - Folder location to save the results - network: str - Network name - jsonpath: - Folder location where file is to be stored - - Returns - ------- - None - - """ + fold_no = config["fold_no"] save_path = config["save_path"] network = config["network"] @@ -43,15 +28,22 @@ def infer_lungseg(config, run_type='pytorch'): datapath = config["data_path"] lung_segpath = config["lung_segpath"] fold = 'fold'+str(fold_no) - save_path = os.path.join(save_path,network,fold) + save_path = os.path.join(save_path, network, fold) if not os.path.isdir(save_path): os.makedirs(save_path) with open(jsonpath) as f: json_file = json.load(f) - testDset = LungDataLoader(datapath=datapath,lung_path = lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) - testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) + testDset = LungDataLoader( + datapath=datapath, + lung_path=lung_segpath, + is_transform=True, + json_file=json_file, + split="valid_set", + img_size=512) + testDataLoader = data.DataLoader( + testDset, batch_size=1, shuffle=False, num_workers=4, pin_memory=True, drop_last=True) testBatches = 0 testDice_lungs = 0 @@ -60,19 +52,19 @@ def infer_lungseg(config, run_type='pytorch'): if run_type == 'pytorch': if network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) + net = SUMNet(in_ch=1, out_ch=2) elif network == 'unet': - net = U_Net(img_ch=1,output_ch=2) + net = U_Net(img_ch=1, output_ch=2) else: - net = R2U_Net(img_ch=1,output_ch=2) + net = R2U_Net(img_ch=1, output_ch=2) if use_gpu: net = net.cuda() - net = load_checkpoint(net,save_path+network+'_best_lungs.pt') + net = load_checkpoint(net, save_path+network+'_best_lungs.pt') elif run_type == 'onnx': - net = load_inference_model(config,run_type='onnx') + net = load_inference_model(config, run_type='onnx') else: - net = load_inference_model(config,run_type='ir') + net = load_inference_model(config, run_type='ir') for data1 in tq(testDataLoader): inputs, labels = data1 @@ -82,40 +74,46 @@ def infer_lungseg(config, run_type='pytorch'): inputs = inputs.cuda() net_out = net(inputs) - net_out_sf = F.softmax(net_out.data,dim=1).detach().cpu() + net_out_sf = F.softmax(net_out.data, dim=1).detach().cpu() elif run_type == 'ir': net_out = net.infer(inputs={'input': inputs})['output'] net_out = torch.tensor(net_out) - net_out_sf = F.softmax(net_out.data,dim=0) + net_out_sf = F.softmax(net_out.data, dim=0) else: ort_inputs = {net.get_inputs()[0].name: to_numpy(inputs)} net_out = net.run(None, ort_inputs) net_out = np.array(net_out) net_out = to_tensor(net_out).squeeze(1).transpose(dim0=1, dim1=0) - net_out_sf = F.softmax(net_out.data,dim=1) + net_out_sf = F.softmax(net_out.data, dim=1) - test_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + test_dice = dice_coefficient(net_out_sf, torch.argmax(labels, dim=1)) pred_max = torch.argmax(net_out_sf, dim=1) preds = torch.zeros(pred_max.shape) preds[pred_max == 1] = 1 if not os.path.isdir(save_path+'seg_results/GT/'): os.makedirs(save_path+'seg_results/GT/') - np.save(save_path+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + np.save(save_path+'seg_results/GT/image' + + str(testBatches), labels[:, 1].cpu()) else: - np.save(save_path+'seg_results/GT/image'+str(testBatches),labels[:,1].cpu()) + np.save(save_path+'seg_results/GT/image' + + str(testBatches), labels[:, 1].cpu()) if not os.path.isdir(save_path+'seg_results/pred/'): os.makedirs(save_path+'seg_results/pred/') - np.save(save_path+'seg_results/pred/image'+str(testBatches),preds.cpu()) + np.save(save_path+'seg_results/pred/image' + + str(testBatches), preds.cpu()) else: - np.save(save_path+'seg_results/pred/image'+str(testBatches),preds.cpu()) + np.save(save_path+'seg_results/pred/image' + + str(testBatches), preds.cpu()) if not os.path.isdir(save_path+'seg_results/image/'): os.makedirs(save_path+'seg_results/image/') - np.save(save_path+'seg_results/image/image'+str(testBatches),inputs.cpu()) + np.save(save_path+'seg_results/image/image' + + str(testBatches), inputs.cpu()) else: - np.save(save_path+'seg_results/image/image'+str(testBatches),inputs.cpu()) + np.save(save_path+'seg_results/image/image' + + str(testBatches), inputs.cpu()) testDice_lungs += test_dice[0] dice_list.append(test_dice[0].cpu()) @@ -124,9 +122,9 @@ def infer_lungseg(config, run_type='pytorch'): # break dice = np.mean(dice_list) - print("Result:",fold,dice) + print("Result:", fold, dice) - #Plots distribution of min values per volume + # Plots distribution of min values per volume plt.figure() plt.title('Distribution of Dice values') plt.hist(dice_list) @@ -139,22 +137,7 @@ def infer_lungseg(config, run_type='pytorch'): return dice - def visualise_seg(loadpath): - """ - To visualise the segmentation performance(Qualitative results) - - Parameters - ---------- - - loadpath: str - Folder location from where the files are to be loaded - - Returns - ------- - None - - """ image_list = os.listdir(loadpath+'GT/') count = 0 @@ -163,26 +146,26 @@ def visualise_seg(loadpath): GT = np.load(loadpath+'GT/'+i) pred = np.load(loadpath+'pred/'+i) - plt.figure(figsize = [15,5]) + plt.figure(figsize=[15, 5]) plt.subplot(141) plt.axis('off') plt.title('Input Image') - plt.imshow(img[0][0],cmap = 'gray') + plt.imshow(img[0][0], cmap='gray') plt.subplot(142) plt.axis('off') plt.title('GT') - plt.imshow(GT[0],cmap = 'gray') + plt.imshow(GT[0], cmap='gray') plt.subplot(143) plt.axis('off') plt.title('Pred') - plt.imshow(pred[0],cmap = 'gray') + plt.imshow(pred[0], cmap='gray') plt.subplot(144) plt.title('GT - Pred') plt.axis('off') test = GT[0]-pred[0] - test[test>0] = 1 - test[test<=0] = 0 - plt.imshow(test,cmap = 'gray') + test[test > 0] = 1 + test[test <= 0] = 0 + plt.imshow(test, cmap='gray') count += 1 if not os.path.isdir(loadpath+'seg_results/op_images/'): diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py index 717d52476ef..f2a1b73891c 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/infer_stage2.py @@ -1,17 +1,15 @@ import torch -import torch.nn as nn -import torch.optim as optim -from torch.optim import lr_scheduler +from torch import nn from torch.utils import data from torch.autograd import Variable import numpy as np from tqdm import tqdm as tq import json -from sklearn.metrics import confusion_matrix from .data_loader import LungPatchDataLoader from .models import LeNet from .utils import load_inference_model + def lungpatch_classifier(config, run_type): imgpath = config["imgpath"] modelpath = config["modelpath"] @@ -20,8 +18,10 @@ def lungpatch_classifier(config, run_type): with open(jsonpath) as f: json_file = json.load(f) - testDset = LungPatchDataLoader(imgpath,json_file,is_transform=True,split="test_set") - testDataLoader = data.DataLoader(testDset,batch_size=1,shuffle=True,num_workers=4,pin_memory=True) + testDset = LungPatchDataLoader( + imgpath, json_file, is_transform=True, split="test_set") + testDataLoader = data.DataLoader( + testDset, batch_size=1, shuffle=True, num_workers=4, pin_memory=True) classification_model_loadPath = modelpath use_gpu = torch.cuda.is_available() @@ -29,16 +29,14 @@ def lungpatch_classifier(config, run_type): net = LeNet() if use_gpu: net = net.cuda() - net.load_state_dict(torch.load(classification_model_loadPath+'lenet_best.pt')) + net.load_state_dict(torch.load( + classification_model_loadPath+'lenet_best.pt')) elif run_type == 'onnx': net = load_inference_model(config, run_type='onnx') else: net = load_inference_model(config, run_type='ir') - - optimizer = optim.Adam(net.parameters(), lr = 1e-4, weight_decay = 1e-5) criterion = nn.BCEWithLogitsLoss() - scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5, verbose=True) testRunningCorrects = 0 testRunningLoss = 0 testBatches = 0 @@ -57,12 +55,12 @@ def lungpatch_classifier(config, run_type): net_out = net.infer(inputs={'input': inputs})['output'] net_out = torch.tensor(net_out) else: - ort_inputs = {net.get_inputs()[0].name: to_numpy(inputs)} + ort_inputs = {net.get_inputs()[0].name: np.array(inputs)} net_out = net.run(None, ort_inputs) net_out = np.array(net_out) net_out = torch.tensor(net_out) - net_loss = criterion(net_out,label) + net_loss = criterion(net_out, label) preds = torch.zeros(net_out.shape).cuda() preds[net_out > 0.5] = 1 preds[net_out <= 0.5] = 0 @@ -70,7 +68,7 @@ def lungpatch_classifier(config, run_type): testRunningLoss += net_loss.item() testRunningCorrects += torch.sum(preds == label.data.float()) - for i,j in zip(preds.cpu().numpy(),label.cpu().numpy()): + for i, j in zip(preds.cpu().numpy(), label.cpu().numpy()): pred_arr.append(i) label_arr.append(j) @@ -81,7 +79,6 @@ def lungpatch_classifier(config, run_type): testepoch_loss = testRunningLoss/testBatches testepoch_acc = 100*(int(testRunningCorrects)/len(pred_arr)) - print(' Loss: {:.4f} | accuracy: {:.4f} '.format( - testepoch_loss,testepoch_acc)) + print(f'Loss: {testepoch_loss} | Accuracy: {testepoch_acc} ') return testepoch_acc diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py index 21c019c6b9e..951d411ae09 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/models.py @@ -308,7 +308,7 @@ def forward(self,x): class Discriminator(nn.Module): def __init__(self,in_ch, out_ch): - super(Discriminator, self).__init__() + super().__init__() self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(in_ch, 64, 3, 1, 0, bias=False), @@ -336,6 +336,6 @@ def __init__(self,in_ch, out_ch): nn.LeakyReLU(0.2, inplace=True), nn.Sigmoid() ) - def forward(self, input): - output = self.main(input) + def forward(self, x): + output = self.main(x) return output.view(-1, 2) #.squeeze(1) diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py index 769f70b5b3b..1e04d9cfc45 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage1.py @@ -1,5 +1,5 @@ import torch -import torch.nn as nn +from torch import nn from torch import optim from torch.utils import data import torch.nn.functional as F @@ -15,6 +15,7 @@ plt.switch_backend('agg') + def train_network(config): """Training function for SUMNet,UNet,R2Unet @@ -43,7 +44,6 @@ def train_network(config): None """ - fold_no = config["fold_no"] fold = 'fold'+str(fold_no) save_path = config["save_path"] @@ -62,35 +62,59 @@ def train_network(config): with open(json_path) as f: json_file = json.load(f) - trainDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="train_set",img_size=512) - valDset = LungDataLoader(datapath=datapath,lung_path=lung_segpath,is_transform=True,json_file=json_file,split="valid_set",img_size=512) - trainDataLoader = data.DataLoader(trainDset,batch_size=4,shuffle=True,num_workers=4,pin_memory=True,drop_last=True) - validDataLoader = data.DataLoader(valDset,batch_size=1,shuffle=False,num_workers=4,pin_memory=True,drop_last=True) + trainDset = LungDataLoader( + datapath=datapath, + lung_path=lung_segpath, + is_transform=True, + json_file=json_file, + split="train_set", + img_size=512) + valDset = LungDataLoader( + datapath=datapath, + lung_path=lung_segpath, + is_transform=True, + json_file=json_file, + split="valid_set", + img_size=512) + trainDataLoader = data.DataLoader( + trainDset, + batch_size=4, + shuffle=True, + num_workers=4, + pin_memory=True, + drop_last=True) + validDataLoader = data.DataLoader( + valDset, + batch_size=1, + shuffle=False, + num_workers=4, + pin_memory=True, + drop_last=True) if network == 'unet': - net = U_Net(img_ch=1,output_ch=2) + net = U_Net(img_ch=1, output_ch=2) if network == 'r2unet': - net = R2U_Net(img_ch=1,output_ch=2) + net = R2U_Net(img_ch=1, output_ch=2) if network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) + net = SUMNet(in_ch=1, out_ch=2) use_gpu = torch.cuda.is_available() if use_gpu: net = net.cuda() - optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + optimizer = optim.Adam(net.parameters(), lr=lrate, weight_decay=1e-5) if adv: - netD2 = Discriminator(in_ch=2,out_ch=2) + netD2 = Discriminator(in_ch=2, out_ch=2) if use_gpu: netD2 = netD2.cuda() - optimizerD2 = optim.Adam(netD2.parameters(), lr = 1e-4, weight_decay = 1e-5) + optimizerD2 = optim.Adam( + netD2.parameters(), lr=1e-4, weight_decay=1e-5) criterionD = nn.BCELoss() D2_losses = [] criterion = nn.BCEWithLogitsLoss() - epochs = epochs trainLoss = [] validLoss = [] trainDiceCoeff_lungs = [] @@ -99,7 +123,6 @@ def train_network(config): bestValidDice_lungs = 0.0 - for epoch in range(epochs): epochStart = time.time() trainRunningLoss = 0 @@ -109,7 +132,6 @@ def train_network(config): trainDice_lungs = 0 validDice_lungs = 0 - net.train(True) for data1 in tq(trainDataLoader): @@ -121,18 +143,19 @@ def train_network(config): net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out,dim=1) + net_out_sf = F.softmax(net_out, dim=1) if adv: optimizerD2.zero_grad() # Concatenate real (GT) and fake (segmented) samples along dim 1 - d_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + d_in = torch.cat((net_out[:, 1].unsqueeze( + 1), labels[:, 1].unsqueeze(1).float()), dim=1) # Shuffling aling dim 1: {real,fake} OR {fake,real} - d_in,shuffLabel = ch_shuffle(d_in) + d_in, shuffLabel = ch_shuffle(d_in) # D2 prediction - confr = netD2(Variable(d_in)).view(d_in.size(0),-1) + confr = netD2(Variable(d_in)).view(d_in.size(0), -1) # Compute loss - LD2 = criterionD(confr,shuffLabel.float().cuda()) + LD2 = criterionD(confr, shuffLabel.float().cuda()) # Compute gradients LD2.backward() # Backpropagate @@ -140,19 +163,25 @@ def train_network(config): # Appending loss for each batch into the list D2_losses.append(LD2.item()) optimizerD2.zero_grad() - d2_in = torch.cat((net_out[:,1].unsqueeze(1),labels[:,1].unsqueeze(1).float()),dim=1) + d2_in = torch.cat((net_out[:, 1].unsqueeze( + 1), labels[:, 1].unsqueeze(1).float()), dim=1) d2_in, d2_lb = ch_shuffle(d2_in) - conffs2 = netD2(d2_in).view(d2_in.size(0),-1) - LGadv2 = criterionD(conffs2,d2_lb.float().cuda()) # Aversarial loss 2 + conffs2 = netD2(d2_in).view(d2_in.size(0), -1) + # Aversarial loss 2 + LGadv2 = criterionD(conffs2, d2_lb.float().cuda()) - BCE_Loss = criterion(net_out[:,1],labels[:,1]) - net_loss = BCE_Loss + BCE_Loss = criterion(net_out[:, 1], labels[:, 1]) + if adv: + net_loss = BCE_Loss + LGadv2 + else: + net_loss = BCE_Loss optimizer.zero_grad() net_loss.backward() optimizer.step() trainRunningLoss += net_loss.item() - trainDice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + trainDice = dice_coefficient( + net_out_sf, torch.argmax(labels, dim=1)) trainDice_lungs += trainDice[0] trainBatches += 1 @@ -172,13 +201,13 @@ def train_network(config): labels = labels.cuda() net_out = net(Variable(inputs)) - net_out_sf = F.softmax(net_out.data,dim=1) - + net_out_sf = F.softmax(net_out.data, dim=1) - BCE_Loss = criterion(net_out[:,1],labels[:,1]) + BCE_Loss = criterion(net_out[:, 1], labels[:, 1]) net_loss = BCE_Loss - val_dice = dice_coefficient(net_out_sf,torch.argmax(labels,dim=1)) + val_dice = dice_coefficient( + net_out_sf, torch.argmax(labels, dim=1)) validDice_lungs += val_dice[0] validRunningLoss += net_loss.item() validBatches += 1 @@ -191,18 +220,21 @@ def train_network(config): if validDice_lungs.cpu() > bestValidDice_lungs: bestValidDice_lungs = validDice_lungs.cpu() torch.save(net.state_dict(), save_path+'/sumnet_best_lungs.pt') - + plot_graphs(train_values=trainLoss, valid_values=validLoss, - save_path=save_path, x_label='Epochs', y_label='Loss', - plot_title='Running Loss', save_name='LossPlot.png') + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Running Loss', save_name='LossPlot.png') epochEnd = time.time()-epochStart - print('Epoch: {:.0f}/{:.0f} | Train Loss: {:.5f} | Valid Loss: {:.5f}' - .format(epoch+1, epochs, trainRunningLoss/trainBatches, validRunningLoss/validBatches)) - print('Dice | Train | Lung {:.3f} | Valid | Lung {:.3f} | ' - .format(trainDice_lungs/trainBatches, validDice_lungs/validBatches)) + train_r_loss = trainRunningLoss/trainBatches + valid_r_loss = validRunningLoss/validBatches + train_r_dice = trainDice_lungs/trainBatches + valid_r_dice = validDice_lungs/validBatches + print( + f'Epoch: {epoch+1}/{epochs} | Train Loss:{train_r_loss} | Valid Loss: {valid_r_loss}') + print(f'Dice | Train{train_r_dice} | Valid {valid_r_dice}') - print('\nTime: {:.0f}m {:.0f}s'.format(epochEnd//60,epochEnd%60)) + print(f'\nTime: {epochEnd//60}m {epochEnd%60}s') print('Saving losses') @@ -215,15 +247,14 @@ def train_network(config): # break end = time.time()-start - print('Training completed in {:.0f}m {:.0f}s'.format(end//60,end%60)) + print(f'Training completed in {end//60}m {end%60}s') plot_graphs(train_values=trainLoss, valid_values=validLoss, - save_path=save_path, x_label='Epochs', y_label='Loss', - plot_title='Loss plot', save_name='LossPlotFinal.png') + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='LossPlotFinal.png') plot_graphs(train_values=trainDiceCoeff_lungs, valid_values=validDiceCoeff_lungs, - save_path=save_path, x_label='Epochs', y_label='Dice coefficient', - plot_title='Dice coefficient', save_name='Dice_Plot.png') + save_path=save_path, x_label='Epochs', y_label='Dice coefficient', + plot_title='Dice coefficient', save_name='Dice_Plot.png') return trainLoss - diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py index 901dda3d09f..4f9e777baf6 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/train_stage2.py @@ -14,39 +14,25 @@ def lungpatch_classifier(config): - """Trains network to classify patches based on the presence of nodule - - Parameters - ---------- - save_path: str - Folder location to save the plots and model - img_path: - Folder location where patch images are stored. - lrate: int,Default = 1e-4 - Learning rate - epochs: int, default = 35 - Total epochs - - Returns - ------- - - None - """ + save_path = config["savepath"] img_path = config["imgpath"] - lrate = config["lrate"] + lrate = config["lrate"] epochs = config["epochs"] json_path = config["jsonpath"] with open(json_path) as f: json_file = json.load(f) - trainDset = LungPatchDataLoader(imgpath=img_path,json_file=json_file,is_transform=True,split="train_set") - valDset = LungPatchDataLoader(imgpath=img_path,json_file=json_file,is_transform=True,split="valid_set") - trainDataLoader = data.DataLoader(trainDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) - validDataLoader = data.DataLoader(valDset,batch_size=16,shuffle=True,num_workers=4,pin_memory=True) + trainDset = LungPatchDataLoader( + imgpath=img_path, json_file=json_file, is_transform=True, split="train_set") + valDset = LungPatchDataLoader( + imgpath=img_path, json_file=json_file, is_transform=True, split="valid_set") + trainDataLoader = data.DataLoader( + trainDset, batch_size=16, shuffle=True, num_workers=4, pin_memory=True) + validDataLoader = data.DataLoader( + valDset, batch_size=16, shuffle=True, num_workers=4, pin_memory=True) - save_path = save_path if not os.path.isdir(save_path): os.makedirs(save_path) use_gpu = torch.cuda.is_available() @@ -56,11 +42,11 @@ def lungpatch_classifier(config): if use_gpu: net = net.cuda() - optimizer = optim.Adam(net.parameters(), lr = lrate, weight_decay = 1e-5) + optimizer = optim.Adam(net.parameters(), lr=lrate, weight_decay=1e-5) criterion = nn.BCEWithLogitsLoss() - scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1, patience=5, verbose=True) + scheduler = lr_scheduler.ReduceLROnPlateau( + optimizer, mode='max', factor=0.1, patience=5, verbose=True) - epochs = epochs trainLoss = [] validLoss = [] trainAcc = [] @@ -87,7 +73,7 @@ def lungpatch_classifier(config): net_out = net(Variable(inputs)) - net_loss = criterion(net_out,label.float()) + net_loss = criterion(net_out, label.float()) preds = torch.zeros(net_out.shape).cuda() preds[net_out > 0.5] = 1 preds[net_out <= 0.5] = 0 @@ -99,8 +85,8 @@ def lungpatch_classifier(config): optimizer.step() trainRunningLoss += net_loss.item() - for i in range(len(preds[:,0])): - if preds[:,0][i] == label[:,0][i].float(): + for i in range(len(preds[:, 0])): + if preds[:, 0][i] == label[:, 0][i].float(): trainRunningCorrects += 1 trainBatches += 1 @@ -112,7 +98,8 @@ def lungpatch_classifier(config): trainLoss.append(trainepoch_loss) trainAcc.append(trainepoch_acc) - print(f'Epoch: {epoch+1}/{epochs}, Train Loss:{trainepoch_loss}, Train acc:{trainepoch_acc}') + print( + f'Epoch: {epoch+1}/{epochs}, Train Loss:{trainepoch_loss}, Train acc:{trainepoch_acc}') with torch.no_grad(): for data1 in tq(validDataLoader): @@ -124,14 +111,14 @@ def lungpatch_classifier(config): net_out = net(Variable(inputs)) - net_loss = criterion(net_out,label) + net_loss = criterion(net_out, label) preds = torch.zeros(net_out.shape).cuda() preds[net_out > 0.5] = 1 preds[net_out <= 0.5] = 0 validRunningLoss += net_loss.item() - for i in range(len(preds[:,0])): - if preds[:,0][i] == label[:,0][i].float(): + for i in range(len(preds[:, 0])): + if preds[:, 0][i] == label[:, 0][i].float(): validRunningCorrects += 1 validBatches += 1 @@ -143,7 +130,8 @@ def lungpatch_classifier(config): validLoss.append(validepoch_loss) validAcc.append(validepoch_acc) - print(f'Epoch: {epoch} Loss: {validepoch_loss} | accuracy: {validepoch_acc}') + print( + f'Epoch: {epoch} Loss: {validepoch_loss} | accuracy: {validepoch_acc}') if validepoch_acc > bestValidAcc: bestValidAcc = validepoch_acc @@ -152,18 +140,19 @@ def lungpatch_classifier(config): scheduler.step(validepoch_loss) plot_graphs( - train_values=trainLoss, valid_values=validLoss, - save_path=save_path, x_label='Epochs', y_label='Loss', - plot_title='Loss plot', save_name='LossPlot.png') + train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='LossPlot.png') epochEnd = time.time()-epochStart - print(f'Epoch: {epoch+1}/{epochs} | Train Loss: {trainepoch_loss} | Valid Loss: {validepoch_loss}') - print('Accuracy | Train_acc {trainepoch_acc} | Valid_acc {validepoch_acc} |') + print( + f'Epoch: {epoch+1}/{epochs} | Train Loss: {trainepoch_loss} | Valid Loss: {validepoch_loss}') + print( + f'Accuracy | Train_acc {trainepoch_acc} | Valid_acc {validepoch_acc} |') print(f'Time: {epochEnd//60}m {epochEnd%60}s') - - print(f'Saving losses') + print('Saving losses') torch.save(trainLoss, save_path+'trainLoss.pt') torch.save(validLoss, save_path+'validLoss.pt') @@ -176,13 +165,13 @@ def lungpatch_classifier(config): end = time.time()-start print(f'Training completed in: {end//60}m {end%60}s') plot_graphs( - train_values=trainLoss, valid_values=validLoss, - save_path=save_path, x_label='Epochs', y_label='Loss', - plot_title='Loss plot', save_name='trainLossFinal.png') + train_values=trainLoss, valid_values=validLoss, + save_path=save_path, x_label='Epochs', y_label='Loss', + plot_title='Loss plot', save_name='trainLossFinal.png') plot_graphs( - train_values=trainAcc, valid_values=validAcc, - save_path=save_path, x_label='Epochs', y_label='Accuracy', - plot_title='Accuracy Plot', save_name='acc_plot.png') + train_values=trainAcc, valid_values=validAcc, + save_path=save_path, x_label='Epochs', y_label='Accuracy', + plot_title='Accuracy Plot', save_name='acc_plot.png') return trainLoss diff --git a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py index ceb1dea3f15..eafe6af4a71 100644 --- a/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py +++ b/misc/pytorch_toolkit/lung_nodule_detection/src/utils/utils.py @@ -5,32 +5,34 @@ import json from .models import LeNet, R2U_Net, SUMNet, U_Net from openvino.inference_engine import IECore -import onnx import onnxruntime + def ch_shuffle(x): - shuffIdx1 = torch.from_numpy(np.random.randint(0,2,x.size(0))) + shuffIdx1 = torch.from_numpy(np.random.randint(0, 2, x.size(0))) shuffIdx2 = 1-shuffIdx1 d_in = torch.Tensor(x.size()).cuda() - d_in[:,shuffIdx1] = x[:,0] - d_in[:,shuffIdx2] = x[:,1] - shuffLabel = torch.cat((shuffIdx1.unsqueeze(1),shuffIdx2.unsqueeze(1)),dim=1) + d_in[:, shuffIdx1] = x[:, 0] + d_in[:, shuffIdx2] = x[:, 1] + shuffLabel = torch.cat( + (shuffIdx1.unsqueeze(1), shuffIdx2.unsqueeze(1)), dim=1) return d_in, shuffLabel + def dice_coefficient(pred1, target): smooth = 1e-15 - pred = torch.argmax(pred1,dim=1) + pred = torch.argmax(pred1, dim=1) num = pred.size()[0] pred_1_hot = torch.eye(3)[pred.squeeze(1)].cuda() pred_1_hot = pred_1_hot.permute(0, 3, 1, 2).float() target_1_hot = torch.eye(3)[target].cuda() - target_1_hot = target_1_hot.permute(0,3, 1, 2).float() + target_1_hot = target_1_hot.permute(0, 3, 1, 2).float() - m1_1 = pred_1_hot[:,1,:,:].view(num, -1).float() - m2_1 = target_1_hot[:,1,:,:].view(num, -1).float() - m1_2 = pred_1_hot[:,2,:,:].view(num, -1).float() - m2_2 = target_1_hot[:,2,:,:].view(num, -1).float() + m1_1 = pred_1_hot[:, 1, :, :].view(num, -1).float() + m2_1 = target_1_hot[:, 1, :, :].view(num, -1).float() + m1_2 = pred_1_hot[:, 2, :, :].view(num, -1).float() + m2_2 = target_1_hot[:, 2, :, :].view(num, -1).float() intersection_1 = (m1_1*m2_1).sum(1) intersection_2 = (m1_2*m2_2).sum(1) @@ -40,18 +42,20 @@ def dice_coefficient(pred1, target): return [score_1.mean()] + def load_model(network): if network == 'unet': - net = U_Net(img_ch=1,output_ch=2) + net = U_Net(img_ch=1, output_ch=2) elif network == 'r2unet': - net = R2U_Net(img_ch=1,output_ch=2) + net = R2U_Net(img_ch=1, output_ch=2) elif network == 'sumnet': - net = SUMNet(in_ch=1,out_ch=2) + net = SUMNet(in_ch=1, out_ch=2) else: net = LeNet() return net + def load_checkpoint(model, checkpoint): if checkpoint is not None: model_checkpoint = torch.load(checkpoint) @@ -60,14 +64,15 @@ def load_checkpoint(model, checkpoint): model.state_dict() return model + def plot_graphs( - train_values, valid_values, - save_path, x_label, y_label, - plot_title, save_name): + train_values, valid_values, + save_path, x_label, y_label, + plot_title, save_name): plt.figure() - plt.plot(range(len(train_values)),train_values,'-r',label='Train') - plt.plot(range(len(valid_values)),valid_values,'-g',label='Valid') + plt.plot(range(len(train_values)), train_values, '-r', label='Train') + plt.plot(range(len(valid_values)), valid_values, '-g', label='Valid') plt.xlabel(x_label) plt.ylabel(y_label) plt.title(plot_title) @@ -75,31 +80,33 @@ def plot_graphs( plt.savefig(os.path.join(save_path, save_name)) plt.close() + def load_inference_model(config, run_type): - if run_type == 'onnx': - model = onnxruntime.InferenceSession(config['onnx_checkpoint']) - else: - ie = IECore() - model_xml = os.path.splitext(config['onnx_checkpoint'])[0] + ".xml" - model_bin = os.path.splitext(model_xml)[0] + ".bin" - model_temp = ie.read_network(model_xml, model_bin) - model = ie.load_network(network=model_temp, device_name='CPU') - return model - -def create_dummy_json_file(json_path,stage): + if run_type == 'onnx': + model = onnxruntime.InferenceSession(config['onnx_checkpoint']) + else: + ie = IECore() + model_xml = os.path.splitext(config['onnx_checkpoint'])[0] + ".xml" + model_bin = os.path.splitext(model_xml)[0] + ".bin" + model_temp = ie.read_network(model_xml, model_bin) + model = ie.load_network(network=model_temp, device_name='CPU') + return model + + +def create_dummy_json_file(json_path, stage): test_data_path = os.path.split(json_path)[0] if stage == 1: - img_path = os.path.join(test_data_path,'stage1','img') + img_path = os.path.join(test_data_path, 'stage1', 'img') else: - img_path = os.path.join(test_data_path,'stage2','img') + img_path = os.path.join(test_data_path, 'stage2', 'img') file_list = os.listdir(img_path) no_files = len(file_list) train_list = file_list[:int(0.6*no_files)] valid_list = file_list[int(0.6*no_files):int(0.8*no_files)] test_list = file_list[int(0.8*no_files):no_files] dummy_dict = { - "train_set":train_list, + "train_set": train_list, "valid_set": valid_list, "test_set": test_list } From 09b8f4eaceecbfe558d99ac1be5f0c7e5b191de0 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Fri, 10 Feb 2023 04:16:46 +0530 Subject: [PATCH 10/47] Add files via upload --- .../README.md | 142 ++++ .../configs/download_configs.json | 16 + .../configs/fl_with_gnn.json | 40 ++ .../configs/fl_without_gnn.json | 40 ++ .../init_venv.sh | 29 + .../media/architecture.jpeg | Bin 0 -> 37193 bytes .../media/results.jpeg | Bin 0 -> 118242 bytes .../media/scheme.jpeg | Bin 0 -> 70533 bytes .../media/scheme.png | Bin 0 -> 279578 bytes .../requirements.txt | 17 + .../setup.py | 5 + .../src/export.py | 33 + .../src/inference.py | 39 ++ .../src/train.py | 43 ++ .../__pycache__/dataloader.cpython-310.pyc | Bin 0 -> 1528 bytes .../__pycache__/downloader.cpython-310.pyc | Bin 0 -> 1285 bytes .../__pycache__/exporter.cpython-310.pyc | Bin 0 -> 2458 bytes .../__pycache__/get_config.cpython-310.pyc | Bin 0 -> 806 bytes .../inference_utils.cpython-310.pyc | Bin 0 -> 5082 bytes .../utils/__pycache__/loss.cpython-310.pyc | Bin 0 -> 2906 bytes .../utils/__pycache__/metric.cpython-310.pyc | Bin 0 -> 1029 bytes .../utils/__pycache__/misc.cpython-310.pyc | Bin 0 -> 3135 bytes .../src/utils/__pycache__/misc.cpython-39.pyc | Bin 0 -> 3138 bytes .../utils/__pycache__/model.cpython-310.pyc | Bin 0 -> 6160 bytes .../utils/__pycache__/model.cpython-37.pyc | Bin 0 -> 6431 bytes .../utils/__pycache__/model.cpython-39.pyc | Bin 0 -> 5565 bytes .../__pycache__/train_utils.cpython-310.pyc | Bin 0 -> 11837 bytes .../transformations.cpython-310.pyc | Bin 0 -> 476 bytes .../src/utils/dataloader.py | 53 ++ .../src/utils/downloader.py | 33 + .../src/utils/exporter.py | 63 ++ .../src/utils/get_config.py | 23 + .../src/utils/inference_utils.py | 213 ++++++ .../src/utils/loss.py | 75 ++ .../src/utils/metric.py | 41 ++ .../src/utils/misc.py | 123 ++++ .../src/utils/model.py | 237 +++++++ .../src/utils/train_utils.py | 659 ++++++++++++++++++ .../src/utils/transformations.py | 18 + .../test/test_export.py | 94 +++ .../test/test_inference.py | 73 ++ .../test/test_train.py | 59 ++ 42 files changed, 2168 insertions(+) create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/architecture.jpeg create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/results.jpeg create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/scheme.jpeg create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/scheme.png create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/export.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/exporter.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/get_config.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/loss.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/train_utils.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/downloader.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/exporter.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md new file mode 100644 index 00000000000..c0109ab3715 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -0,0 +1,142 @@ +# Federated Learning for Site Aware Chest Radiograph Screening + +The shortage of Radiologists is inspiring the development of +Deep Learning (DL) based solutions for detecting cardio, thoracic and pulmonary pathologies in Chest radiographs through +multi-institutional collaborations. However, sharing the training data across multiple sites is often impossible due to privacy, ownership and technical challenges. Although Federated +Learning (FL) has emerged as a solution to this, the large +variations in disease prevalence and co-morbidity distributions +across the sites may hinder proper training. We propose a DL +architecture with a Convolutional Neural Network (CNN) followed by a Graph Neural Network (GNN) to address this issue. +The CNN-GNN model is trained by modifying the Federated +Averaging algorithm. The CNN weights are shared across all +sites to extract robust features while separate GNN models are +trained at each site to leverage the local co-morbidity dependencies for multi-label disease classification. The CheXpert +dataset is partitioned across five sites to simulate the FL set +up. Federated training did not show any significant drop in +performance over centralized training. The site-specific GNN +models also demonstrated their efficacy in modelling local disease co-occurrence statistics leading to an average area under +the ROC curve of 0.79 with a 1.74% improvement. + +Figure below shows the overall schematic diagram of federated learning proposed in [1] + + +## The proposed CNN-GNN architecture + + + +Separate Fully Connected (FC) layers are employed to obtain different 512-D +features for each class. These are used as node features to construct a graph whose edges capture the co-occurrence dependencies +between the classes at each site. The graph is provided as input to a Graph Neural Network to obtain the prediction labels for +each node. The entire CNN-GNN architecture is trainable in an end-to-end manner. + +## Results + +The overall performance of the proposed CNN-GNN is + + +## Model + +Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/high_low/weights.zip). + +Inference models will be made available in the [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public) as well. + +Note: The ONNX and IR representation models accepts inputs of fixed size mentioned in configuration file. This needs to be updated based on the input size. + +## Setup + +* Ubuntu 20.04 +* Python 3.8 +* NVidia GPU for training +* 16 GB RAM for inference + +## Code and Directory Organisation + + +``` +radiology_compression/ + src/ + utils/ + dataprep.py + downloader.py + downloader.py + evaluators.py + exporter.py + generate.py + get_config.py + inference_utils.py + model.py + train_utils.py + export.py + inference.py + train.py + configs/ + phase1_config.json + phase2_config.json + download_configs.json + media/ + tests/ + test_export.py + test_inference.py + test_train.py + init_venv.sh + README.md + requirements.txt + setup.py +``` + +## Code Structure + +1. `train.py` in src directory contains the code for training the model. +2. `inference.py` in src directory contains the code for evaluating the model with test set. +3. `export.py` in src directory generating the ONNX and Openvino IR of the trained model. +4. All dependencies are provided in **utils** folder. + +5. **tests** directory contains unit tests. +6. **config** directory contains model configurations for the network. + + +### Run Tests + +Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase1/phase1.zip) and [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase2/phase2.zip). + +## Acknowledgement + +The model and architecture was first published in 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI). + +This work is supported through a research grant from Intel +India Grand Challenge 2016 for Project MIRIAD. + +**Principal Investigators** + +Dr Debdoot Sheet
+Department of Electrical Engineering,
+Indian Institute of Technology Kharagpur
+email: debdoot@ee.iitkgp.ac.in + +Dr Ramanathan Sethuraman,
+Intel Technology India Pvt. Ltd.
+email: ramanathan.sethuraman@intel.com + +**Contributor** + +The codes/model was contributed to the OpenVINO project by + + Aditya Kasliwal,
+Department of Data Science and Computer Applications,
+Manipal Institute of Technology, Manipal
+email: kasliwaladitya17@gmail.com
+Github username: Kasliwal17 + + Rakshith Sathish,
+Advanced Technology Development Center,
+Indian Institute of Technology Kharagpur
+email: rakshith.sathish@kgpian.iitkgp.ac.in
+Github username: Rakshith2597 + + +## References + + +
+[1] Chakravarty, Arunava and Kar, Avik and Sethuraman, Ramanathan and Sheet, Debdoot; Federated Learning for Site Aware Chest Radiograph Screening; In 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI). (link) +
diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json new file mode 100644 index 00000000000..de5f3f223d0 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json @@ -0,0 +1,16 @@ +{ + "data":{ + "dest_path_data": "dataset/data/data.zip", + "url_data":"", + "url_split": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase1/phase1.zip", + "dest_path_split": "dataset/split.zip" + }, + "fl_with_gnn":{ + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/cbis_a1_b1.zip", + "dest_path_model": "model_weights/with_gnn/checkpoint.zip" + }, + "fl_without_gnn":{ + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/cbis_a1_b1.zip", + "dest_path_model": "model_weights/without_gnn/checkpoint.zip" + } +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json new file mode 100644 index 00000000000..9a761af296b --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json @@ -0,0 +1,40 @@ +{ + "train": { + "data": "/storage/adityak/chest_x_ray/", + "split_npz": "/storage/adityak/split.npz", + "batch_size": 8, + "epochs": 1, + "gpu": "True", + "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "lr": 1e-5, + "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "savepath": "/storage/adityak/wt_without_gnn/", + "backbone":"resnet", + "gnn":"True" + }, + "inference": { + "data": "/storage/adityak/chest_x_ray/", + "split_npz": "/storage/adityak/split.npz", + "batch_size": 1, + "epochs": 5, + "gpu": "True", + "gnn":"True", + "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "checkpoint": "/storage/adityak/wt_without_gnn/modeltry.pt", + "backbone":"resnet", + "max_samples":10 + }, + "export": { + "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "backbone":"resnet", + "split_path":"/storage/adityak/split.npz", + "input_shape": [ + 1, + 1, + 320, + 320 + ], + "model_name_onnx": "modeltry.onnx", + "model_name": "modeltry" + } +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json new file mode 100644 index 00000000000..e26c05f9e7c --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json @@ -0,0 +1,40 @@ +{ + "train": { + "data": "/storage/adityak/chest_x_ray/", + "split_npz": "/storage/adityak/split.npz", + "batch_size": 12, + "epochs": 1, + "gpu": "True", + "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "lr": 1e-5, + "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "savepath": "/storage/adityak/wt_without_gnn/", + "backbone":"resnet", + "gnn":"False" + }, + "inference": { + "data": "/storage/adityak/chest_x_ray/", + "split_npz": "/storage/adityak/split.npz", + "batch_size": 1, + "epochs": 5, + "gpu": "True", + "gnn":"False", + "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "checkpoint": "/storage/adityak/wt_without_gnn/modeltry.pt", + "backbone":"resnet", + "max_samples":10 + }, + "export": { + "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "backbone":"resnet", + "split_path":"/storage/adityak/split.npz", + "input_shape": [ + 1, + 1, + 320, + 320 + ], + "model_name_onnx": "modeltry.onnx", + "model_name": "modeltry" + } +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh new file mode 100644 index 00000000000..1edc95f3985 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +work_dir=$(realpath "$(dirname $0)") + +venv_dir=$1 +if [ -z "$venv_dir" ]; then + venv_dir=venv +fi + +cd ${work_dir} + +if [ -e venv ]; then + echo + echo "Virtualenv already exists. Use command to start working:" + echo "$ . venv/bin/activate" +fi + +virtualenv ${venv_dir} -p python3 --prompt="(compression)" + +. ${venv_dir}/bin/activate + + +cat requirements.txt | xargs -n 1 -L 1 pip3 install + +pip install -e . + +echo +echo "Activate a virtual environment to start working:" +echo "$ . ${venv_dir}/bin/activate" \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/architecture.jpeg b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/architecture.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..18408892144944e68ea64a6f4db240c01a6c1ffb GIT binary patch literal 37193 zcmdS91ymi&(l9z`aCdiyY}_^kcXv&2cL+{`I|O%kcMB2-?k)iW1b2rJB+0*Xa?bVp z*8A4C-g@irJ$t6Qx~jXXySlo%XMTSA`31m`laiGJpr8N%3i1Pft^&aTJS07Xe};hY z3<(+XLq|hFK|{yGz=ZtpsBj5!Ar}=H4H*gK&%;DV$HXJd#la=4AR(dfO7E4gZzSsT|0TB=y=9j2{OR#XzFz`^%Ah>_V15nT~uy8+D z02CMq4+abd06-u4|Bdbc76598FyiPDxPmeY;zNl0A4}aAfBc>-+V`YweU$FS^LdxA zG1->%i)!MeBF1>Kh09`0Dv?Q4#R%nMOd2pYB-?nLzrbI(dpZGH3P&2)rn9nUk5Y4{ zuio|spLF||s&F%OUBBqjN1u~{^{W|QCTg>@VvWbvsV!Wx&0*|mf078X3*l*5s5VcP z(iYs3-}^)*@?WF|?Km(#)Lqx?IBc0;yEkh=M5CQGwWiB*TYKgJJ6Nk2qf6MNy8hhe z2(*$-r{PDZ6`NK3Wz&S&$c;L;A&a6Efw33G`t#uX$IYe4$;MG-?e>)8sC;N&WQj|C z*J&1Rch|lXubNa+7*Y)So@UF)l@GlidKbbbZjuD|OMbNX*ORP5y)u4fY!hNv-B8_Z zeP>KvN-XP4dv%6M@tPIVxiQ~2AN;D(^yXFB?kTarq;geJs6c-r@u%>w zdN;i)O*2|P`;P3?BQeA`rfl5;BVbaFxF>~q0=OfDtU!8-#mow`!IS;mSfY2_pR`wK z-IDO0I^+U`;Ve9>-=7j+9rZp}A9uSq;Fy7Eqvcs$dBS|cs^i8x?_=38iE@yM8Jeg4 zN&AytzCN>ErM;eS!|FmzQ(MA4?{5=CG*<{$!nZXf#?|P?e0xpl)lGU@9M^HI23H zvR2xiG6_~V<}e-&mpQVfy}XgZZdG3C~>yK2S)SY}dq_QEDvD`cR<`is*m8a~l zjN2*{>LTDAHhZFc1BRNAMOT42`}JMB zC?-}`CfILyRllsPxwhf>-uUE0ho-qKnW zP&+XMi$0WnvryOyivtG9k!?Dajr3LOJIo<&}4gmFSyNYkpB-QX%0B^68)o?82?i5f!FS+ zmtJ-BR`)aeKbs={&8qsUz%f?8>k*t^h+jwrZbQpx%RH@v*GwxYe@BP{6CYLT{))sGBA-+MmUV1U5 z@)*zE=nl&Ln`k7tRR+>DqfD}ya=~G-jg)l5slv>O>V#%}UF(RZ1@W2dOv{*Ea&2~P z?Ri&rh`tsVO{S@A^|X@HI*+kpCqeqonC;?ZdjMF%Y1wH@QFE^r)QK)SH!#TbVqJeVmfnmM`mB&e4bMDOhz+J)yc@* z?0ESt#V!c>^|2SU9g|+osGG*vLalWh?xL^+om2w1eN8<81@DICMmRm#`70(-ki;6;oDoRh_7G2lp#%{tQ34RX%mOp69)-L3AY@!R;AC^&XBI7d78&+3URs<$zrhBrJ+wEyeF{Y1||KV() z#dk=uSssoRJ^P@nojq9q_8Y|2RxmGAj!bmF{NQa~@Q00GFK2t|%SSwaS3veDO=6{t zH|OSDnWAeF#q%~Hu%HZ@TwYb`8K618k2IC^?cRaNLo$gBT?7Ca9m1aZykY)2hN%|( z$}GHX9JifKnqGecfcV?-W2H3n*P0b3I8!1L{t%%d4mMb~Ikh^i*q=RE82}`b@pOm1 z{KQu$SIcd7Ii4#ge~mTl0RU#Zx>)%>b}R!@PJ&hEyOx;7Jmq4D1>z(D{NMGdZ@(OQ zDb+HE3IG7cL9N%1Xpa?*RXplkquxxnCRaD^K3&8gNVlK$xtD=uGr-^I|+_D3L#a^lK^0WPN{YY7cStioG;{HV%1U z?!d=vamCHG&Xled4(0t-a0M4)e@s>~*;q=f-!A{b&+Fk4=|@)0sZF()JCYtmxE??< zSy!jgR^BLGI*uQ$^BKcyqkyrmPd}1s8>&<1Vx4cNA|eFA*0%K%4J>8-aZppnIXRs? zHwLBJ5c>uI1V{iuN-&kGZjWsOng;-_Wv!KF;->M#!C3`a-;Q9Jza;sW=S*j;_?S)3 zx{v+`Zz&?H+U43p!Ax)F9HADjU3sZYd@K`|AR7KqqFedUY->81Rr7jsPFwBJqf8P+ z6fUgF(t)(S*)0zvaFc-CWKgC`pUxHKH~^E|$10>_K6~Y7;ep}?vCdh;>otO@bX6ya zkM#%O?33*)Ri@r*@%$R@&r~A+BGX@4MjQJIAO2H^L$>?d{o@nn2bYfKiss!I1=zv-Z1ZwA2SBLOun%lL%wx&WZjf1szHb09aY)dRq1R5n-2n$y79{j$%(39oLu zY+a?)QV+I|SbsJ1FHa;f1H>QeX`hmt4dlP!*QndGzfysqAPF`<3a1wi0{8=n{=oGVw+f&Y*9#p^;SWpE z;|~Cm06;+qQL3`%mr|h=5K0IWC~*>F8nWHa>%|q31gNL^;OJWNVQxUM02G2h0H}%p z2xvV}2R1u#a13xOzm)=z2|$xLpt3wDM{_lVi*U>e0L~hPLJOnWGwxUOI3fU=Q`%1; zS}AS*!nPQh^kRkr(iW5`NaP(t9sn6t99TdMflmZV1Oow?9U26Q889f|APNIlAPRq^ z9)Q5eATRhA4}jww1k6Z7ih|j7{C^=h;zax@U@n&Oeq~^x10i=B$auqCKmhzWHVO)V z2!J7mfZ-3o0yGf#&je}Yi+?;M1Kp1l=nq*tC?IeZ1PeEbfKE^-0?GZ61{ffmzY~0r zI0=XW9YF*Gk}ne>fbcem0Duq3aIao%l|)6{0Ec>0Eqnu^#2-#{%>vy<9Z7Gf7vzu z{rrC}^98xJ_9R6eWE6s6L4ELVCrS2|-7i15-sE;3G(Y{f9(ci|J-5p>Z0~z91isGG ze*6h=KTbV;4STx&2{2^-Hz~# z9xhQB(DrEO-x}XhdS&a#_Aop`OwMvW#eneq?R7DPVai5->c!Q&=HGM$=opl8G*|Dy zBaScrz);qOy|JF+yKs?jgw#9w*<L$_a18|)PC+A3e1J#- z{;~h`@NVIH5bQL@MGaw#1VgZB+7MPq2BO=P@^3~2tU*{PN#^SwcpUP_EYWnw5whE2 z-kD62>nSeRDR99%hlEw`TaH|{NMo#zKG*(jY>cAFRli0Ff=R}q8S230HnvJgod83$xpZC z>XDVfS3}^rNj?Zs>lkXsWa{Pq=Yl(e-d{qp-2%HI z@H>Xf2?!09K18Ms2*B`N5K^rC=e2tAwWSxLA?Ne0uwWR zEbnKCdHB9)#Es!`=NnuMmwm|)k&_yK=3h>gzyXu5=KJ=6uJ^0ux5(8GpUL5?!{+7p ztKY9sUHP>SQKM9_e?y`O3S=KKM^bbp;s?}~2~32Du!H(-z8_Mj&vTsIP+SN-UoU1w zgqCMm0&`a0)ylm4vH1LZx3{cuwuIC&PKaxnrCAInQUua06 z(d}2WHS~3kTpi!qBV{bJ+UVX;I3a){#@u3;yy&rZjOEo9XumOT+~s>ZkSFvbJk-o? zTo+_9GgczFL$VL$a1-tuvqu55j~(&dUWcg$D0d7ECZ|5C}R#|erk;N$RTFP4YBdzUwfBQI2K zcO2(!P4V$&4^))-*PYk&wWMs-s`5D6FZhY?9oqDLLmqPE$(EmGY4o%9?zUn2>wAry z5md&0a1``=o3|=aGk)IAMd-MCS)J@m^K@_8hFRo^^e^M+BC}%ZegR)dvLe+s0`_9b z(_t+X;oYX+vN8~nffh~p^km~1TrYrumakMynsf>KPT$<0#boG>A{Qt3cu)yO5@oZ) zTIWJ%ExlkFN>zx5Po=H@^H-!_u(gD0p1-4klMLzLa;`&u+SqokuFU;nseFC*66P(M zyzu)E2DwF7%JX{n#J5DtBEG)r19Yznk;RaPaYQ}tp=P%Hc~rq6+k@+J6xni_RY6k! z6L8h6zFExgH}*DgVLH0fF9;RsA|t5OzdzMX0kzT7Q`OY$zJF~E3=^yki-09D5Mg6w z48W>wUo?pD+7)X0TXfSHKD$UUEvfa&hZ^o|c2roPb4Byv(Jy{lk~0OducK+N^CQO6 zTJov(XSC<$wk0TIcWnOF-9a*>{eT4t%k$2c*#?ER`(x+ZDcW3v4L0RjFZv@^-1x%`4NkZz`S5%$2zczP47U5^n4wF6el-3+Xx>2?$?ogc{u|197`{ zG$(EcCiflQJxXnk{9)sIO;L>n>@_-coBDS3wtnKzWOo{mGj~TGlxe<8*2Z_MR!8JY zWx~+E`c_>s+Z>pV!??)*kA|(VEmym3J zq@=*MdPk?~mKWCI<~&D#KN&324RiO&i`|zY9^b>B%2o^+XnMb((c{`<)mtW3DxRKS zkjE%8D^d%3+f26syp7(;x)BP2xj_TY)fT$<@K>ciGA#K@9NI; z`752T!{Dv#Q~q0v&-b&uWZcdxbl0s*XIen`^8f#A8gtuV>39JZb!3Sh4_p=5|Tp-C7rPZ;_BpdRNP=_Gr%@nd& zvOKt5P|UTyu-b(>%|eDcqPr!c=v_eh=9Z6U;qL;V(K|Zcu;+L*J&B zvnBOreW-cha;G&?m!-Oi2R`U&;7xLKd}5l`5d8Th@0H!7U^1OLQZj?c!g*zs?&&HP z9cRy-#-D?oOgv&uq5f{`e2cSVO ze3|EJq)u&r0&rztlV^FV%6|e^F9vk6xfAd=sz~mf3(fRb5`|C+kprd9=whqB!lSG8s)OM~ICKZZ+Fa8NoU+jMJvs+iy_oxgTw>7@Kx@UHHjp=om z9Lv>$P!7_?jpYrxEDDvSCBjLJ1yq+?^IgX?D9R}U5gug78&y2=P|K@p9q@|FnS|55xo`B10J5LZ2x zsmuHA$ONxqK$e<^&!R9xCxZRGnlF=l#4ZE_BBPIkmVvIan_a{J4oOeEc&anTkmU+X zg09IOk>Qr(b^_wym4J1>+ooRF7UXE-HlF;U1ky$U3T>5d=U;5j9&D@NdsIJ4(+$(w zogr)yd}q30XmmkJFjJGExzabBu>1QEpJ{S9UxxI4OA*i6~$3eX5^f6 z@`l@aX=2~8_Akf$OHT=Z4jEjwnZ|d~dAU#@idW-fYpDG8U&}Yw(hQ^E_=MHO4j&yk zi4Ipb8Ok%E1qZ3zkzF3IY|k1^q1vjM6`FztE6`DN^|jaP@f~aK{43uY8Os!C1^c4-K74^BpqAAWEV%Qwog-8cGAzr*-<|)ZDa6g&}TcvqfO} zmMpchVVXPX-cZLxGD}VzZiH$F5~-wtdjw>i`VnL! z5&8A>>2dD!_FZ)fLPIoWP~vTM`D*c#aB^b@4w>GHP|I81N@kw286|Ggp8%BJyyaV% zclu_gbLyiuXS{FB3U;dev%js$>8+g>%(vN_hjqsuwPzPMcA~Kt_rP4;8X>vc5jDmv z57=`fwS3|6;CaH_IKOq5p&Q*nxdaR2zKO3cOoD}>I%Ci#@zehmCu%NwouG=at2l}L z1mH^vat!Yea}U_x$1DtTph@Z~S+>d|02!Svy0sgeQt3t@r60J~1hMN}XarFGPTWlj zIl02SbCpAHk>C~edk?>z=e-i_85&F)Nh}v3CsL=JEQYz900yEO{6%^;^az7&4DW|G zL_Q?QIi3+#D;H%GKXK~iD!DmcOxXBQ9Bgc6uzVDqmW6CNN)r&NlT{MXoa43bFsHEE z$-vTKbW2^j2!Wh&XgUuqGab6CaUA>&MV8O}>^02xpw>>aoClTBwap5-8xIGyR7AjN zaP|Z!TET~!V`aVPo7sOFda1`EjhYB+acKVMPZxhq&l}d{PbN=qnwtgPy2EB$JWPVd z^rc&-_8xUgQmE5Dm)~imF1fI!q=!FsS+!)(t5z@?F!Zcx^ST-&b70PF1{{6#a)wTh zxj-CQQ-Aq+GJ__On*WQCzP8R8l{+6)0QXNoWjW(^_x0NjsfY9+MQSv-5%TH$6<(D= zYAGn`_LB6yxZxi&Wq(vnjY=JQl%ky?75^>g#kYOP?oJpJfP#U1RrCxN@%QHL*X|Az zfW?Ar?l?tV;IJvEI8>c+K*sgdTq2N-oohh6TK-2GBNL~5P{x#q4U716{wBq7`s0XtvXbl=GxY2T0im@nd+VLsL{D-!*b&}!&rq9>++ zP>Pc>jNGQfwKBa`SF&riq}Dz8>T7XbGXi=|IuoT)dT#-}=}xEt#YgDCbw*g>8m?v9 zChYIj7tH%ATZzHr3JV!)a-jecU#=^}}d_g*)3{&5# ztkbd%k5e)T(}l$nXNAtLz0~m)yW)oL-Ot(NjVOhwB@)asOg8NqFkH46k&tCnrX$~f zes10)+vYC2M2{39(kKAz;Vseo5+JzpN&HojRpm(EWpbBq-qx zPJ9R>F}YLQQS#+zloAvzy==*%N>k7dj0(PF+d{4k3 zfuXMX7-NPqvZ=tk(3EtK1nEeVIbOCb)%9<0Ga5Mzzqc4T3<?_1tgyh1`09aD?FEcx4{HU?o?aiivC&j+*dheC|8^USIKQ+%n=KlUwtY+4yjbY zVz)H{F)PgCY`V?4aG>97#`rL{`qhBma$H_3pOv5)Gy;_*)5ioenUd-Z+g{FVK{gNM zPv2zQbyNc)+XW~;fu+V`KJ6<{nd&dyhmE$23GBEmq=~$zq?023n*mq}l#PJ6UXT?0 z(Z!L+UiMRsMIN$yCzQ5w!3>c^cIp!}aavHdR;Jh3FQlX0B5eNQ4u*`o8h-# zGeyLpr6%Jqc#mugJC(e(WzJ?M3`sj*J^xe#5oQazhAhqBGot~(Hxh{-bpNNWjH?WW zAtR$>nx`>9VT?W8=4_66`CYtW-)WzfMra`pEHI~E!XGN-w^5Az~SR z4aIx~aJR*HRXVe41$RT{X9m>0`#k2FTe95`qia}(`?kr}q$KLOCa z=274)S?V**$_V{?nEX%26HT;I)n|HgDM-od8WGE`ZLTSG3DX!{o5JeZ-0ji~kLZmM zH~Rd0jG)7+jWLx%EkF0kgcB0^JyC}-D;K34#|OH&qD?&w6Vn%R_Ve@rhZ=t`UvM{qca5eKRAW{P>&i7D~@tjcv3EzEZNQVJKBIq2i zAuX8kWzx}N%Y06;i65f}vVu7B_*_L)F*EEwyOz8Tnz4;p42tjJ(nbgkhqQ<8Y_Mjz zju#)Num)-`a^z7!){~}NDNf7iYKT;_Lvv=J+NZQRIsH^hIzsFTZg&GV=q;n3(P(o8 z;+$d(Ow6Q2u7?_@Dja$=lwmIDD07{gQMycgQawtmaLo9!_dlB6!bdt>mVOA|PW{Bz zn)3NkK-w+Gq4zae6BjW~9}`UywC!@*Q`1r-`@MC(Lk~r0|6I4!mf;tl%Ade9n^m!G zEYTva6)+=oN-&F90xD(6v^fKeF^Fk%SD?YHe}_j`Sfwt|r{eAmvBPyfqmib3IX%8C zCaTU}j+TkBgf-K`lykq~oBS!df2E1{B_|Xek3gS8xXKMz6ug=!P4*xDltEUMX5f#Z z{lf!T%A6%mh+xY!`*~CRxK|V4w;Ze!V#H&iS5dCDmqn75osKk?Lc-mWdRZ87+9f0( z|D7&zi>zs1LpxYHJRLU-`I9u9bhkI$ScQOsQnNX^EcxZhhU2t}h(bmaMP~vy*6EqW zuL(o_8hf8G!`3ZU^(%ArZmaMRk+?rx1Td>F24;j=>-wx`NEUf>8qe+2o1u%jCO`xm zL@}USSKLOTPyX5{D2owj_fl_n}6))O0<@}NuisgY>t>hv`I!UI@ zqy(fMs)V9)N?8uH1zIMIJcN8}2b5bA$Xm73_AmC$EEhOi^IMTgX4xs|i8nR^C>?A} zY}r_R_r4Rl;m2sYgTZfOCl0ALJGYryur4^b_+^ALf4SQv`pP(ZU66J4EWh-Z=g@(v z(rY6I&7Pudy5vX=5A96H$7tRQr3k>=J){OpWW(R``e@Dh@s zPvR#y>2a0~Cy(m!*ji=@hv9RLKDVJ=aQcjrH{%4dla`IYW^G0^35|leqQeYikyH~) zP;;ZIAx3R4f?-zx>$fQI6vlIKl`h0AFd^i?SF^uH-JxRc5WxYaROZc>y=3vxMq)20 z`WGSt+k1rxzdDAa)44H3Hn|dSa~RWeF8^8`W#ybUK?*WsFH5Ie4o*jnojqCBiwx(mP9e5%Qu0eBuOvU zEh1(TxJe^!Q~-}BL2Kru;-A-h`A1*}`EvdR^iSXdRV`4hm<&p1S=WK~tJJ96 zk2m9JZ7G>*3whQtNhw>e%&6v4)e_=;_)FgXc$U7fen9d}3h}$*nH|dc$!)~>n74VT z#}KeZLcdIgchvfxSGih4+Sj!7J1W`T*J!t&KmqmokD1uq8wsC{752Fw;qQDNr3Bv2 zV4lCV8d83s5;{L6IQ$7nNOyfNBzEt|Bxz0&N6UR7p z3{5}uaa1*CGp>>{@z48xP+RmHq)92cW{AtU)S~&M?#lEMWUrfV&-_7okDZ=-6Y3mK z)p<6OT*K3}Jk7kgUcp$CXF>2K_72{*6|P>dbPbP~vJ{tyaWd9UW7qvCNEz$bM*jKv5X44jugr+iq_N$-)FN+eZ4UULM14uI^AtSVh~0 zG|NoS3Eo2CEo|?DuF_CCLl#geqn;X%?gm%Rr)5juSs%zW8RyYZmt#uD!KiJTS=d>5 z?DMl6rOoPU9^kQxUNzX`8MR&=r{z*QI;EPNU;TRb6A&FgI(I{ZDIj@`N_EvFu`>N0 zKT?LjjQC=CIhUjV8%&HY@kW%Eol99}};L5DF_Y-YKPm-VFWlN9bOU5VbZhL9F zJ}hx0v69V+kS}cQQFAFZCgU~t1L@)a?Nj_~0o$y>{8hP}dvAnVEklCmB$SE`Dmn=- z`z9r;Km{Lqq(^vv`k4XUSe)(H)?$6 z^QwEZ+EcFAUOXQzHgvCoO>8of9}b7m8R7UB@4@&56MuH%#EN!86Bk#c#uov~V~&_p zykatHE=#oC_Su;a2 zk|NUgUvlBf@wMGwur{%%iq-Xc*S_bM@mjX?=|CBz?18kM;yM)lX@vY=0vAJ=J#xX2AmvIe%TR5+ zgYcv-J~ZwobpL&N5r}COdN(1`^P(`S-SFybdaGI-*+2UH1oFZz8@Yd|3A3|Tc1-i3 zMPol?e*Q$bv3!iZ5!c<6c0_WHX~lL@(amFJj!z)QmxxMZY7;Yzb4bFZdCTZqpo9+R ziepgWYK=RECSQ$OY*rFH#{(UgFUE}JlAt8lRLHboJwk$6-}1Jgd6^j3(wm9e#O_>5 z!A>!E@9mBe)sWgWRw_gBnp`d0p-_FjTJF)~fYdh$bP`Ym&CPy=%C?6(mrU=?bwnz3-B6)f6CE1T9c^=z~YXVKs6E;912AZkR zlC9(eFI)yQj{rQ&nwWki0qdVO^&Ohf-Fj*tHgb+{GfeAh8fiiGO**IENeSVajdF3| z?0nXDp^LuSAm?UlP$`{FQrN5`&4uKzxi<1@#p>wF-ft7q!&sSTHd=@IV0e;?vB(eV zN~GhuE17TBe**87^-zY3k(`-%V|XAh64`Z2JoCi#4%sn6#Zr$1Dafkf1=6549cBkjxDK#M41c3 z4bDppGkWk;zfq>We2Oz8E{(Lml2>kcXpu!HNP#q6`Z27$oN8N z9dr0{C78UPX%JVS-EHs>+WnCBPpk#0nbQ4z`Q7k#Yf!zO{CqU00W96mUN^dTXrl6e zvFzgr;b*(TuqvhqIE@lkl|eD3m0^vUO*QxlB#A=~`cVOJaX!Vi?2P9DG%G&2y;>?s zfhH&zbI2f5x2w>>nTXs$ivLKRpu+V_gZ|`pZjoeWnY?ISQ{fsz+^=fkt5vOC(5R5jDYOqO>;*^+A>l`Ke1HxNEUhiY^Mxkf5nE3MVbPeTHD*F%V zp{q*dMWUx*>7uq6Blr^@Exr|&@fs*&jwog`g=~n)y!0j><(~qx$%f=7)m+uPb+l0P zZB8V#%x>F=HDRK2v=}gG*Ih=3uIg7JNF|~)rDV#%2jDs6vrhkQq6t9}^qXqcA}S(` zbM9@0g$BhUbN`zIACruVbl>bY#0{GtEBS!^G0l+u--ar`BS404Ht~pvQW+y7qBzH` zKXATWgHftzsryVT(l+vIuR2bMq(ivcF>2aon37c&)WKYqh)f6_sRPk^{0xCv^J2T) z*|58f`TH=3b`MLQG|8%qwU)$*3``q&=GR`}i?9qQd%VOujd!T&ogPJ2w_m&h-Bx*2B}wU?VP zHo0Hc;!@e@$`7y>B(=>}WE^Z2vFO=a_pmdl&Sc%q(yVwD8K~PAQ%f%3YUkt`X|E_i zUVweLOtz_G*ZCopW0xtQQ*FG9THztw80d^-{*iBbn&HSx<{?~cwu#o78&-py{)MxY zZb9TWzT8Fh!UWoKBEo0;A>(NIr8ARc@8lwGi}9+{kdMa^UKYi360xqmPP*;Ets+KE zcPO`M_Cj_yu#|8%?Hnb|%L<(B^SSbH#pV1DJ{OE|T(t;Q|Gs{T>FE(xMnG|Q@|ltu zqOS|ecWB&6W|y~~o^dds`#l|&u?5D~ zIO8J9Q|_KKujXLdf_d$Wt}TM~5=PfvD)R+{M+aw&uKjvnFEEPfcFi{RvC@|GVt|VJPTto9A%~0+sryup3=Q-edXD+ z9d1u6uOcUn7@BK-w6%!JwV8Vvm7H=Nt7(X18BxiPme538@r}Sm&bo`$N6OQ|@J@um zD$b3-VzlN*?JSk#0M$)lsl)V2jv5~tUUy)6fA;3w)=z-xy|9uc_Se>Him^R0O%Y$dykR;1xFvX2Vs)$ZyjXU+7pAFXYa-sF4}8%32ADyX{| zxQK)HfbP&~;0?J={|C2J#|nqU$Zlx^S)qUI@_!R*(@cja;lJRJJg}7zh*LB6ion!K z)x-fY;Uavd&-}+O-ofVOSl?anaqXGewRnZnr1^VQH3+=2l9eKH&8G zS-s&mmhXyHOut`TrH5dTkyXUU&nGOEYh#2zrTSr`>K*2_#GEV0CKps{@ldD(D@%3rZ z{EhMGmHd=V9X|`p*EkSo&RiB)u|@Fe*Akuw()%kGh6a-g$88Lfr>IlHmeale*VTJf z)TC{mJ_k0PQ7x#9!gb)^17$wdJanrnh3;sza`&v|tv@y!-$z|9e2i)Wi4BcU`=7%w ztcp8zVO%i=6y{*r1R!}wh7d??fCwNyGo6YU1={9 zGd~}BWoB2^P3srm(D0ND*?9jQ&23fJTnsQ3T}p*ZTGBaYpAvB-cf6AMlDI%{H2AFb zu6@AcOXDq1-Jj;%$IccJdd5(Ow8UN#?x4c!eq5t=Nmo(Mjq~oAi4OjV>OhN?hONXv z#wgbJF^1J_V?}NWH1bjL*~2OR@*wvm&sWg|TZ-6A z$U$XFp5{0TG!~H-VRLC;6u8{u+0&(neYjD0+oVB#v^CAM+U^pQ4R;Fna8@8LlqhXo zW$o*+q{Xzu{^Ll!{zh-5BO-fN9*i6mZjARS80gW8UYnJw#SXLTmmvz8;@r6c;1XN(B`+NMM zao;GyQ^xK7l&~q-(0k$9?<1uCX8-DlO+F#(In!G)>z$ROt142C>&SL1EOUk$DN+9* z`=n^a;F2fcpJj&|##?I>!`SGrdty6G1e%q`RCm2^w%#K*!a zz2<$!lCs_X)zD=4Uq{pAx_*cJKX&cdw(TE{6FvK=$TVep*3%%b-eW?xgXgv-F-6xSm!LglEzyRCt+o{k>!ZU^LW@s@Y(PS*r#U-)wNTwS@TTXC zcHD-u1^I~hi~KiQSYjcfWj(pX)@2ui7LZ))%g z*UY@+mVUN;PkkYvzG1&2z8E9|QhT(Pl0~LuB!UEIE4^YeZaw%JgC;LT@U>Q8_X12k zAIn<*XxpB_`M*uKi}H5@W%`rv<&WuwB?gKx#gk8l#CPZvsvBtq?qzzuHTb&%z1G| zRBs+Dz7JJkbr{)RR2D2Ox3RwaAVq83g|RDFa6*AH9ZrD4${m#i;$)GSwCTO+b5W;l zB1hH=AcNsPjQrNep-$WDXgJj#;eMHjAbF&bbyZK+r+0|{AbYxyaQcEc)CQ1X!MNsKz4AJWN22E3{S01Ec zbIOLaKtkPJi_G0{x#i6hknb@NE=mbC$tB@jS*${SyAcI+Vylvsidc9M+6ue|K&;kcauE~Q*ZAf(Ar2EzAA3Q% z%4^Kl(me6l!G!4ovM89UI z*yX<(zZZEu9|>3HW7K|#!^Ye~Q(C-|B8HDir?*2tS)rVoo+} zGB4IB!1nMBB)oZXXx2X5R*)BD2DSy@$DdA~b)tl;+LCx9sUBlEq@T5XY&>{0Y< zQ<&fWNcSOA2jQ85f~hZmmF9C3{L-Sd)b!3Oy-bEzc+d8AEZMoWF(O8`IeM1|RG35S z8HLACi>oB(W2tq__p&8-nCj`bElSCsJ{4V4D-YtbvX!!jB;j6_=g^zKOh=N`Zx_j@ z7EmS#%goeQ;t8P@fYxw}@574eT-7#sUuuKF0Q5dG_(4{)W|av1)YeqbZ;NeSvR||a z-+kG*C_1+p|MnBOi{aPOs`N=Z4*Q{^2*qxWq&vHY_7uTmsZdFa4rCStvg4%|A!t-R zv?|jKQq9~1Af;w;(hF(4z)>ph@0P~((uVhxBPqTK672yP#Vug>CCAp-gSv_TxQ)|_jAjTSEbvjL9`&Ll7uo+Q~BF<=Nn`@6lqAKdM47pp% z@oX9BM@-~N4as?RM}$=k){y9|y7qsLOEpL8(!`y`jC%6uisz3UWJ|5}r4lVkW>{t| z%6cMe>u2AKUnU3z&k9mw3}r~05xO&Wl{OG280R2WMt^CBt{KBYY9p5w*^Y2`W?ie+ zES8SWrFA@a`A&_R!6J^lBBqb&Bx(;){Oa1?Nr|e)a`7C!CwUTimu}!dtuu0f2(;Z_ee8?ZG+ORu9 zceQO|es6mkI*k#WXC+etx14aKm+a*=;bZz6pDC|cj5rPYGv568g6G&FWmV_%{;e6$ zb_~;_O@0*HegIEW*7obEz8(7Xm$T;a+j?Ji=|ezc!(K&vLCq>XkgtKF2We%KeLmv3 zx9bSkC^yn%APppcb!(OCs^_p~yWFxjoS|SFk$ZQOxE615gtZ2hXnky%M;VD)@4e5H zmga?KSl~uFGwTD2L!gMB;j|Q(sWf>Irb6HiHANX22fI=o>=GC!aeNYc`(v}Inf`^c zFAa1aQW@Viu`?W_eAL9ZCig!M=x^A@TbJljk^?mc&8J_%)6O7XRr zc|jnj793opg7W8SIZrDp7L&28nj#K@a9d+D&-boxxK>mTv#V?!PBU^M<0w>CY^FHa zPjS0m-*r(&xn}3v%jLKy3-z?4MphqJNvQ)h7O1eC)EIGkj!0C;O>xedaFzqpGWZDB z^$(&s{)nE;6po*}O~q*i!H8VdyhRki|3)$w&+&NZ7pf=6R9D#YRQEnT^T3!v)U)KO z@l=iSFruY6rJ+}D&Jby3+Nn?e!z}J#oD1znrk5=xEBx$HV$qU~HDY?E_d}X>!-|GT zrLqFzG+7LZ0#T3SGm#%I*eH}1WEz_U1OhQ;BBv(N6=j`xO}Xm?bKx53Y$)viJr zKFboh2Zi$(={ZSDcBQ!60>1eRqTaUB!IeS!*K7EMkXnO{QV!JzU8lLeXYnad1Kw#8HX!(`wboX zq`4B1a%2VYQ+F2KwrY5`s3(lp{_sc3m6C%iG7M;}Y}faH8*=yNY@l@N+%>nHsI5$5 zQ;#4LUwXEg@I=aB{};TscO$h`p0T57>#c(B+7~QH(N^ITi~>(F73ioAx?j~o2OWGGwo*y>yI%M)#`y3opXIpaE=`tw?Sdj(Bderk5$X|5bX`Xbog{8P@C; zgI-(?2M^Bd)Syfsu!amB|=-82NrhbOg!)pVDXWi_FuS|%I$g<>|GjqA(aPqU9zHa zGp+s$gFj)6DRg$7n9~}&MOqiH6UTfkizZG=wUdyyqKHEzPEK>0v>=q%BjKZETFV^b zN~{%Qba8R?j7QeU#zvmt*a`z?5&YjuG>uLd45sD&gbx0a4Yq9eaSoaSHd$$u&>A0L zt=ST52Tk4L{|1ERC=I%PTGBel%W~_>S=LnH-}(H}_=L$YyGusvBde+1tLM(gAw!GC z=o^qLucWbrAPnf~v+pwAh6{5bxX zzwwu2B%vbm!sLwr*{7kzvIZi&ih7YM8-CGocU@f#DT+Tl(?!+O?was|V*e~TeCp|k zXgPoCSO`iPhKWF7v)GyA^wyl$GVGd%IAzKIj2ZlF{=Ubz{7~Vai;i{j!vC=9|5B6z zP3@|Iwm)=~dtvzIC#NnIvYe^(y1YkEvksHV2r<`FN?pCQbA$}z;|`gBIO>V~ zPlhd7-NL$G-6gJLJOH+yC{k4Hn51o;;jIAp_ndHz`IS&NvfM`Jurhnbt|&07SISfW z*Gm^ew1NhTIopfKtt+PW6(7x2k28Xi_NDnh#+>@BA4pZfG*Up)q7iv{Nl{{~S0 zY<{ZR`L&&Cj*V8)awvvrtRUKAYISyBvwT2Ojn_PnjNVuV(B)~A+po=(oY|2~v`@(& zE-F-ao8r6q-{NbW?%}wluZO-#;I)8Lf?gVh6rmuh_^*(&R-vef7c*YtS41)aGtBD( zF8=U;yk8B;IYU=wM+bss6Q@4Ce3KIH;lJ2sKhx0UWL1r+q=DAwiYKFTCBY4EExrK?Wu;JXRCm{A* zW;4gY1fv3Q|L9wTj?+a!MI+`FIKgFbfBew9Lpr(gB` zlJbuMu1_LvR1ZsO+~-gj?%^)&Y*2VsWhK2L)?vj@m@$IYGmy2>@25USgERDz40 zm3?0!wB)z4l_wXBDD$wg4ZEp?E<^uFkiW!YOoTm!lALDLKFpO|E7plz;Xd~PBL?za z4<6qLRg&FCH)vE|>a@l|5)UP*6E2waUAkSYKJE;^c?K;BO^bJtsy!H(o8~rYNUETR z!wAikQpMpEI3^dmZqVCt)w;W9yh^`AOYMXA1sB@ zITf_IcnZkxgs)50l-TkphDl>#Aq&E*s4$raCfX8Wom(8xE|fTdigI$b=>ECxxn)m1 z`%F9ip$djHc-QZo964>WRNq@$u5|y^)n8MTFj^t#O!}JJxkvMdi+Ik7a#|jrd;PcN zx2kU;s)V3qs;3upmMmxgHA#Wr7D8)inE)aESef}lXAj1sT_(1T4sq(v7)%TLLfC@XQ%=uxhOoBk7tZ^Oz*S1*7 zu1~)CAl(DEAiA^&rN-%s=I#~*CH|U*3It(}O#$ZQ>cQyI z{BL!#9fgMG-!oXoHF`Eq1CLhYzjaO1)IZQ^O_swmz;Y90=PEadGtgZ-b_!2)Sr|HD z`^?5nDc{@-DmM;59i{vZ_?2ju51YmKl>UmRUz+z&@tig6VRE)5^S?^Nl~xVwk=&ZZ z8Y4Wgv3h(@t+j=_7@lNrOEXx4W(JtiOml#zKHslMLZ}jmz!UYNG*a`T<66q9XS-^I zQ*(C=q=Y@aGUFIWriPxc8?wceW?nK zS@b(*hi)=GblwK*@ImJGu7TeGT;a6870rSOJ@r(M748Vl`W60kyOBI#lNU6m{S62- z#A;40*CtT;NSByTh@<<_7%zuea>9FjJPv~6;0Vt~xeTBA z5P#Hzr%g|6sbPRo+8`5rx3NGwvJf&r^~E*j>STjj+VF9g6xXbgZ`q$3p`zwDpp`@a zG2?zj@;^qZMY)QkG)JBZwzQ0CCc}6rXNqE0k+5*Q(Z=Y5R6Ny=#^po{tu>uhEmv`f z;rAOi0xQf`P8XUm`h?|=FpL+d!^q!wI|^+C^<`#bOZ_qUy~0|&Z+Hjl)A0<9dc!y^ zKay?js(JszI#kMP@*v?TSTVzlq8xC>_l35g&`n7!mxKH{;k3UMvF(`lJ(Cg<*9h%q zSVcw|)7gKZ_Fm1u27qHpZI^7C;t=44Hp1P$Qy3p7wX0&Amk#02!|j*gJO#Uv1C9PH z4ZHTRir|zxRaEMCK7|~H+20=DAS84I=327z6tSOJ^4x zpfxTPDTk6^ZMXmmV>pDZAAOd+I!MNwbFATfO z`wmr!A^j%`;V3RyNV5%&GQRdR-TgmF-9unFsG0Zfb}dP3EL3Dmf7bbg=Uw`(t@i=N zE`!vv1f`fitaX18g#=Ib;fJ&Yg&n88L*Q8~p5k2MjXdEH0?i?(A9Xkrnkp!4qHD}M z`oP4o%*A5SRY!wqnl>^1t1CO%T(AdSFM( zKN-r4HR$}?({U(d8emiCDvuQP7AGH90-N>Ajw)xL))3`Fdm3Qh!6zaHA$O21!lksC zir75e!Hv6Gh?#Atjun?!^gq$iW>d&9$_)==vR~-Z5ZkK>INgvD9k})s2J4Kz1i-Szr&U+$4#XI)K_;K zRDFfXMIw*CkQd9tLj3yFAtHBh?rzn0xHLCK-r5wW(5;ra*!(EKqAK3duOK}}uiHM& zgx}l1;kDF>TDcS}-n#fErTe!-EzAXL&soOdi~eZ)v9)5<@I0Th?cdD%H(bUZHWN69d(nFH%O+L}(tre3N1HbR8K{xukC8<#LG<=~1+c z^+0iMuA#Od{aJAu7v(cu)=0BI*=4VwV_$jCZ0hS$eJ|z0zqzd)LGCp*_bKUbSr*zj zb@uKuFS_1<_~a8fcwy-`V2yy~-RGxW#ucrA&Vh4*!ErrBVIll%tH#-VJ2^+_Fjb{C zaF^W2+y3>3%Y_6zOv^WPrLVYy=L8%!-Xbj#nzv$0dn9|NocsnHVkeiHHy1OV;p%@4 zyT5$lY{2k?ezGl5rnVW*M?7G>*H~Uyky-CMii^ql4Nz;Pq1iNdVD8FwHOotScl)F8 zj4dd4WO(W~pll5bYx;8p`^L1daPS_@fVp8^rx0`h*#3ZAPKm2>E9X&|O zJ{`X_G)%hZ-Bvw}jB@_}f18wbFjn}S;c2_W&fBLoG0*&2jh-tw=Oo9qojv&*;!*?C z!e9*#M@L#1TXZL*cSJBufT0AFcZHS`C3ij!HPyUlYnBU9F|C@0w43_C2R^G63v)S= z?aK%O07|BEQGmtsapd7@36iY$9`Z!|pC{ggJRT7D*^ePy_qV zd@04N47$D~d8D{DvH8o8{{}3XX-$f;Ai9^&K`7W7n^S+4*JFbtwDqM0!eO@-gyRd< zKR;>caBZc(w8&W+WjDy|LL%PkzV?rfLo<9`*YK#@k#S?%F|bVuAB1B#@3@X>H(SSa zA{#A8FtTM)l74dC>u??JxdnUcDvyeUUvq0h%D%$SUP>`4>Hh=5EFx6o^w7KYs~eQB zlQL*d`B-4%65s73tZtWp0Du4pV0^&3#Bd$6BEDP4hfNugVkkHa>Qy_w;sqUmEu_C5zc3q3^2#DY8$N zKey@GfDg3erp?$lDUHu~A~Gtw{VhBUKnSn|_^53lp&v`NZbD^aM}(2=ah01Vg)`P-AYim78UR|kz=_b8Uhd*l3hSlwKc{a5eS3XHlBdeKQP z2oN#{O-Sw9)PJ!oJZRWuDvf_CBg||>mFfzs@^?M!yf}J4mKBUeYZA}y!7up@nvx^v zoMfg-P)HE!{1Yra%U)%BU|%0QA*;09iiwUI2|8`>mlyum>ELtGq=m~F9$|Q7H9p#F z$xDk#F#6|-VF)@y3cxJ3X!0qgup8R}adf1-m#S&y?<3g>FIRC8T%4$MMl_h+zhn4R zpOG)RD4*Deg4AKZnJeVnEoj=lN1eti)Wi2sC4fy8)jKVnJzn3%6qz43k6 zC)BD{MqgyU1PyNSRc?rh+S^v%)V5V(CJLTNQZQA%H`NJIBnRcJfCXI(Fz2{Xt;7Z8 z@U0jVXNw^pY$n>#Z8gmGV~Qt=(aV0XNI{hu-*6Cb5vRSy2oiE^tKPCezP&B;+!qd~ zgD@wR7R`$!isW`l@|nKL(FjAQ1(6t}&qtEzjS&+mvam1(>squ`{v`jN`)wDNg`!a7 z%zNP4`??)c3P_*bLX4?sls$3NzPj+6vv;tf%lnuu*PusJ70i}4Q4QE2+;Z0~ibGhT zRY`IXo7}G_oBE1uCO7_kl&uh6iOSo97_{g66{#x}md&SGq2Q7k5nqp;pG+8I5%84t z{jePIw}&EKpJeyTQXaCQ*~aj8Bk!Xx!zJy%%%tnff|Bp5GjtPlbzZrOgsN z%)cHN?8Q~dS@R9mXVP>fgTjsJwRKLAO6%KL2X$S!+hkaxpPsq^bIfddHfay4uWLe4 zOGYW)-L_$Uk=kA#^qM$S(JcW=?WyjB6Z-035?Nn?GAR8qC@7N%8QTTgtqWqv0zyd% zF#Eg0d#-c~EcCOl5r>6X#%KY3XhIj5NR~Qtf#!XDALT7{_`cB-d;O8YOwtKVjBrKw z>@?H=Xa+fZ2|om*Dc`IV6)+6AH*71zSgXUxI4z&XQjrCg*XD}{r0#}V-Uecu^ag`~ zaFmfKHx>e18i^_#RInu^Bx!uWuEYzev_^C1`xpov0{{VCTTLUd?&WOdUwEnz!KN@I{WXb4+ZeYzg0If=93R~0=uTnnBg zBisp)N*w!RSBF15h0|d#t`>~{r~l;arIWQ%1^-Q5Wy>*&38$wlgagPf$@{J9F6h=Q z$T&;eNAXpq`whq8L>8=679tLeAl`6T*8$rr4xe6JA2iBE=^Ok9Q&koGEGVWQvZWp- zWtI}Pg=Pjl6QjMlEn4);VlWHr77n4ffn|V3gPX#S!Mr+GYIu%yr&y9RlAxcS404qu zc@!oMDAytYIWHYJonIOveO#AP&h|8yCv(pG*QGh{9lg26(P?-762Yr)VrV-|3}anz zG+4uXOg0<>{TH;?fJNDoNEoF5*1ndsj=yS$q%Ik`PSJs#Y)7yn2*W`;EP!7wM~r>k z+~qTA+yo^8M`ZgoMtQEFheGx|L4|#`Gg5@x$kZ|hTvR9hHw@SAh_c)4#JDc6sO&uC z;jEA1u-E;^Fflx$x3JSM?7w&cW$(m)B&X|{9sR({RbglDpEOW43uclG%9Mgmufi;6 zeMg`F+}`rn+Q}W}BAOw+^Ytp-kosC5(37gDNtpKlkKw*Id-UJ>#H+v_beh%AU@^NF z*LM6uZC1egNNrt&2{ap%RuTsSFv2G3!|F7n5rh+gK)oP4?;n{R;j^rKFpbecGeqKc zFvW<8&@ZX;kOB&1T1bVe)Q1S{;r3nYlL(lh4eTE=!g67nS56?d%ldIX`0KkFCVh6V zN5^-SNJyz1Qx8UgkA|YyEqQ2-0`Y1N@84XGCHz$ri1NzrS0WG$ZvFJb|JdsTC3RAn z#5M{5Mo{{Hc{>UzA79fs_Zwt^-Z9W1!Re<1L^d?mAZ}G5^yg5mKqD9s zzVqz0l)qoPKq-QugGIOwHM;AK+Rp|LIu;Id%hM%7$KgXc3QH+T%JZbV>1QGr+B7Vw z!s2CIGHdYWC48H|7NcQ_1ZQol$f_NyUj(t}4@Yl(q*lIKvQ5vPxne$9iaV`rr`*S% zYyg|TS7_*jZS@ol>I3Vqz^<*F;Nrp~#> zK5Qd#glQ@CWLWH|5mbbhCiofdP18F1*j^#_OCi!~Ai^txc`eUHJqj07zho+RUxcF^ zU477a^!dOg5JNeKsAx>1+JDTx=ki=dAN!K~_br)rXH!U%Twt^_c|KPFh&UqZkt~EL zDPa0hp#cNbEW()SgQ&zurwrRl%BleZbXzfkLrO<@pDa`Ph2St(VId@8ou-Y$<;!7% z8n9q0=3U)e*|i(DrT6gV8-RUBUn=;ZY&>s;ZCF)sbwx8D6)%WI(hDAdrWK)6|44sq zXU!0Q6;TbVr7ik?w&c8bl~y_&0)ZWH9$JyF5y?RK(35XTyhJ0MDrGPfJF_!|MGFRg zC?)S)CvWK%vyHGv?E??V;twbz0JRuG$K+gT$iCCn8mmZ1*0u$k`ZG18VIVc~V`--f ztS5rK!n;QQDaG*qTlFmg_vO^9-+=D7Ndl6N+G}vBH`AyP4H&i6*+tYO7%gn-HTALf z_v!E8+m9)oKF=e}>H-A~xb_NyVp5m51;O1*k$ZU+9G`LvhD*x^_(ngKNPAVq&=6^2 zF&i7ZuNqu5dAkfdNY;Zix-ltT1Zwx}aE9l_IiT~BBW?(a{>XiFUgBJ$k8BEGycx66 zdT;~5B8fzB4|*pmI%s>Xj5&I%Acu=2YtO7#{ z7&zQ~lR;>GAmCj&4}6-rZNdgS^oQOK9ZLn100a`JiiGQ6`=xqJw|80Fo;5gy83kns z9h4$ICRZt(MM*ql9C{osT%E3MsMm^|X~zK|Nh{t7FhKO6ICSDLj1Dq^9YQv3>DIhR z`*oO}3fzBQAJnFt_rU|G5#}=701lx2dJn`wmxq-f!B*QNEcpEh-`Y?cZ8&>Y?|PIYdQ@G%uoCW37yfBU?^%%MKi3_9aw8Qr~RT?aekKtPVkd;H{ZU`QL!` z>eqWuh(E64@P0gr!*h0v4Y&jjV8B}L!c8C`O2b*i@o~ltJZiA;`9&=fmBTMA z3y#YsMUPAC!j;lEKMfv%>+@5=6dTsKQehLpQD0*tg+fWuV4pRvBk+}V3A?E$E<*vj z5@xWL*k>kecG_xw+9HU)R2)NiXhRiK%tcpCmZYmYy~Q+~)Gt=#KN)@lvSdKc9Hq&8 z<@Nn6d0wPM6~(#O%R7EXWWa;6n8I2EhiVFW4i@{qeAuJv%JMp+9=!iD(m;!&t2T0( z_6OF{S!dB0w`(KlAq`{taG3HGqaDa~(Je>G_UzJPN=E`Gpn&Qmg$Ws}V9Ezm`6E2@ zlfgVkio&;-!l0x(0UQshqB`L)c}MH0^J#>TYVjfBex1``r`v~TesRfmLA=Q&!KusL zR4oU;DJ7oLc$%E=enjVnVsd<#ACmjzy#f#fWhJ>Ix(Rh|vB96pH{j2#iV|ndC6WmH`wRto-mh^g=^tMKaFjnQ4@>mh!^Zft`uUM_ivOd$dPtG@d-Qd}9br1n>a)*R!zpEbdnbNVL_{*< zDENXv290Y0sr~D6920KB3E~eT4!YO-cJIe)KZ8hT=={4_=i!a+s=?wkJeZ53&FE~5 zUxOK32BdVJR7MKU(*AyS7VkgR5m`lSpcV+U2=Ig`xAu1z;gYqB6M@TUA_SqhdC&W;jF!D@%0+FIlUyc%=7!pbmfbVA>x7ZK``*zIIgDpeiQ?AhtAX-QSf}teDp@swihl{4Ce;82xr!8e`qcfaZCSHl4SJ+ zdy2`fQ|0g8+-s##NColc3J%xA_J-PL{T~n2b;de3pOZXhiQXIlZLccLh~c(_DZrDFb$0^&U-ulUa)*-{sXFJOKS2`YZo zz3_M#>7Qq`0%+fbZmTQ7WRN1`KAUg{K6gMY1IUG23o%svo2#Fy1e#FJKz1hHBGdTp za{h*k_g%kbE5TJu_W|T4Dp&-Ciy&3J%3HYL>Os2+ha{v^blDrQhh!nS_({Ofv{CHU zVau3C?(5E-!?pKSbgpA~AXj{6I6Mhj@A5eocruDC-?1B5e8~g3WDme-0osQ4m_BTV zxEc^epabVh^GV715B~sZSZ~7pOKnJJu%$5J0vVBg(-EYRw=?i;Dey^RuPfM-fm6G9 z9jK7dhE!Mh3dRlm^Gf$CSk=1H{pJIECOt%R%zh%kiMa^ebS2thJPNMZgc@VbcWym= zWyiVLh6h!d@2R4uiUHVg4J;BGOX9hM!T%oLmEw@656^(l3^2$x1|^n%x{qonE%lO3JKGm zCG(rPLyHi$0HP0}eE|ci^E1t#;F;bhoN_2f0f214kiJ*n35~af)i(4oRDCt6OCI%- zB$+sxDK~aqEwsl>%iWLSIb;;KCYh=}f+l8)LI@h~?!^5xE#koVghDAn5JVbD)L??E z@V4AV5<>Ot{5r*|TJ`&P%$?MTsbdZ3vyae-iCO8F)^yB>8#XK7h#53?t?!n?M&x1$ z1i%rfS7FnSnS{IR#}jypOH$*(2AzjAM-9}(Ggf&;6Liw!`A1+O8=O^rajzHHLgLqw zm+;zCPkOS1KqFY0BS#cM5MV{roZ~x*aD(Q|B)&MU$oE>7*vhSN)HO8qvZlm7>Y*)n z1S9gJt_EY2PwDGO!ta;h(tln~t9(72eO}hvS3vC(TeS$F3^ai?&|6xE?9_8SK1&vG zb0a6oRY3Oka1Y?2>s~*?9f|H$FCt@(&jNP(eBaP2iXcXk6<6fvEZ{Rh;~%{R2-j8V zAw3oU$axM-tAKOFh!**;dM6P2c4?WPL6&b0rS{^ZN=$v z8KZZ>#yyi>3y3kMk{Dr+RW?1~1I|_C*3RdKC4(MH$iZ=z?3S51zkyD+qf2&guIAuC zyjD=Za*}kvJ^KXJCtXnjqkZogD?34S>E{8J%dpXSl2N;=1%38??Eh7=l5+TMc#sHqB4v{S=93JrL zN3aXxM>f^A+?jg;^FgNc#n4xOsrZ5GcCMdU^*mBoD@7FmiQzzZL9k#`of+f zoZKYy=LA$ZH1w@WRjr`=*8e$|vChb4GKKaEal$`Y&>ba^T3yCnH638K(q;kS2TVD7 z@LHS5(JH7(%Ue*~NJ4^hZD;DTNMyOh%~`(q`^03YTM_K$8Y_X_mz}xg2mwIK{&KaT zw5)cZ2>t#JhwO;W?g#mgchtT!LJN{hyu9e(l#U`{n7_|Mb8V%wgFD>A^816-P& z7XMg%%iFKPB-21CV+rz9Xd<(9yDkGE`ob@WXM|L4QwR^4H|aN@v>qIIC8B2~6ZBzB zbvGt)?@>*wjm=Nme}9vz_^zH#hv7{Nyh&LaqWg6c{)V(K1RbP~a|E{{~2I#JKEpO!m3N3-*PwT3LN)8xBcE7t#j@#|E`DjE~6PH30PgFuISCa_Ka- zoh~OTh}dNjx{9dtf5a?eS6TV6Lv*q#$_yX;6bjww1rF9c4K5jMWsnGZC(6&*=k@gh zzwDf4FAYoS)sP~#IZH`g{*9j|B36YVzG=y$QQa!J7B$2n2dd!IR-@Y5WW$&+7mVT; z;MQl;i?^jzQ(eS-0y(_G{AKe4r(5@xFjYu1b{oKtbtHCAH|bWzv2yh!YcyrUY!6<77aE{i*3Ggko@9jK12~!68d}{JhBb? zUPh5RwxpL{4G3iLz1$s5AQS$duSwC>)S9zBz&*5{c-xSPIN_)S=g90YikuK_(=jw5RS=# zoLvB$=j$d*5sM-OP(6#CI+W!>^8joA20TCftx-VFCxwAUfQLc&bGYe$)hIMT91`Yk z6iEdaLE>f&{g>OWv;VtB0s2#;aQ7`TcZ8Rk=%$yT!CPELqlrCJa1y%^`}*S}$w%^t z5$Hh)6ZE)xUtk^^l5Q&?d?tM6!@%k zy|cK6bD;%gY!mzNu}UiG)-5CupV1vt0ROckqGRAdk>SC4(98NY!>obfiVj-}QKu~l zPL30N9p`|Ay6VjGj2G>ylFZsnYwr7!T3_5Z?J5H7?u8-xL~-uOa_x}uu$!FxnRloy zQ7D##T8rrYqsZbHrnX>4uw8ee8rrNCwisqMNj&G$E7eQR^O@?S)l@5LO!FL5_ zjbz(X?xX}|`1~9~95#Gh*yAifJl2$PDwuvgB_k0{#swyh+gOY~TRn@C%U5B(=GF4p zRjXu{xUR_W=oNewjSAuOp=Ab{g^N*jQk9SK3GQU$r`>ZcCjvQV$|`aK?Kb{Cb^vhCAc%Z>xwE7dTCu@Fu|m3F_a(R&-$OGT@rWav*b6;$(qe z6|We`+a5gOeLz@h;u8xU56)}`8D*Qxsjamrj^;m@`&{*3 zT+bRHHjAwQtbFqd1GpFEAXi!{kfd^q3WO>%p-xxctor%_9Rvl#xzAE1@YAx;_OI2O zEh_t|Ru_u;Kl8yJCEy>>W8igK0P)DiNjghYq246Om9 zt;KPpRv68j^zTQA(-5)-`a z79BF@c`&1XuH05M`~bP~N81&5=rhi|nT_gq5D-U=MfWegI)2vnuD8g5*%~}#50U;= zbVOL!n+Z^Lq!xGkR1OKxvE>m^ik7C}wVo?k(m@Bv2FqD6A=Y#cUh~zAvklr7WW@1g zpVg9QJkY@9>4N=soz|H1O0DUMA6{(c1}9Zs!LPnh!02C7crmgD#^rd6%_uB|WsHmq z=;~KJEM0vCF9(T|J=*8jx6U+20;H?A5ZfwY^wUjMW+j=aw35;6+=`a6nej^9F?LDXS!&c!>BAX^hNh=RmgnK>vg^nNH~! zlne=~Unm=sTpioFQIuz(V9_#QGjK=M@2$XDX1Hp^K52dFb>infe~s+u?Nn)#QiS8` zEpM#u_oW!3SOSt}jDB4Mr<*&+6(S5qUi_1!}}LX5q#yeA zolph8?xJxs^<1`R)QAK`5SDL{38u0h>luxw8v;`0JMaHdr$kjOZ$G63mxxV46cF{w z8G=_uTx%*b@U=@sp3NvjsrViKEYbCecht<@TBX(NgB~6b+gjavs+8lAD~b*FRFJQ z9PB=1U?z233D;<={sUf11w(SI{<`J>5ovGS$(msbyh<<{()pK_==!>k8@?KRHG#!M z;<&~D$^H?HTok36o^h0%o$s7JqgEu&dNF54NsRlZ;u$f+jw@Sw>+6YMT^OBo5#r>M z{Z4epXuMwAul*#GeRf*41Z0bb!RuqH-MQk8I#|v3RB+GmLmCA94|--e)^>!PQa`4K z4!h+=KjKYoRg%X8H{zyLf4q+P^>$d+uBHf4@Y!IWjgbUsLzeDhlX-&})2VAzp({nG(Z%8}t8s$mK>v1%`ZEvCc5xc34eCgy6=ev@TU-HtzK1?ERkK(q9;LtM`ht6;8t9{Enkf(8LyPqpmr zkk6@`Oi4&#Ktk63QP*?#MTZ6cJpA4tIo6vy{;6a&LY$9yvOo_t?4e*a1p_3rq_4VW zavC}jJP*V(yh5du8JM4+v2Mwa1mPq(!%+?65L$179dJ=ngBMttN;T#2l;jT~wze5u z1`$mM?^M^%N``3B5wWji9bkV56YbD0V)B0YHs!wGoio=H6X59^@r(G5e9`7;bL#}y z?__bl&O)-P)iThU2xF85u%uf2@Jbm*`r8vpKu@qt7Zoup|4gkgLLa-;GbBg`{xnxsRPxa-$sCu|qkv zKFwJ7q?3iMZdg>q_TJ}#=6Ud$Huxv$f^c;A5R+`1qs`phfccP6jr ztZnWj)0I&%yZ0sm0vfzi3a(Z1MR$a5n=a&WbB? zIUA|pNm~)eARv33j4F@gg;dn{QY9vP74@qUcs|5x8&ipdV|*KHL;(#G^ZDTnz(10N zTOZ&m-^DDf&^oz!*M+?B|C|PpWt(iuzqP*@Ou@k;U->~G+`+)gUEv+VLH!-jG>4pE z`c@XEkk*s?vq@wT{A(3>>T9nC6O8S}njbf0nbk%Ibxx6XN6vRzDr7T-(=eT77q%cw zvBRX5W7nqD#^_X@z8&M}m+G&_Ovn_$Gz)P%PC$A%*xh)kBHRzZ0TF!#;ovbIg~QcQ z8U!vpgQ1SuGFKGJZ}8^{H-;aAz_?*t z@74w)@Pye`YJ8(w;-P@Q;yZ|!&Herv8Ju!uaf0ez&I!%L{HPLQVSu8!c#}+9?_(4x z_NILEI_9&c(0#dBPWHhK><}AO-`6jN-$)5Q!@cmE)niy`Ylsd8!mlM&z1@Sz%@8qL z?smUklw9=J)qB9T<#8B67VGB%}m7#Ii^c_E#$$e1SB#&0?km4BjPm|{vt;}?2_WH`qhI0>s$;V}@lXhZ(R3?U{Y*H0sJQk#i5Eqw z$>>spqb&LPkfz6L<%dvyv7R5ts7v#t#L}uS zl9MSAvcGPRMS&Eslyd2Y7YzvMRFB^A5gF2&uR^K?A71{;?r0kQ2CzB&I6?Sem=&Qs zu>m9*Gfq>v&S7X4+15+uXv!;-KWO?=uGh0bS@0E6y!{BN9DU}<_G0AKHu-+o>_x&@ zoJGZZNSEmw_h3Yx1bs(@n2K)pbDgHqCd+57c)xt>oJVhh&M$9-ddXj_J^cp2@hx2u z={pa~yrEN~U-^MfyO!FGVZsYbe0l+9`gLfez5skTYlZw30$||Zf+gw+^OP?~lKH__ zWS(VfBuM^T(XMQS(kVM5ls)Ri_cdlsZq$1oeVnMezXrXRUL-}RcGkfKw*p3aGF z*r+%}2LYb!G+95^LWN>#h4$T1bMN@z{`dZ$MBe?Iygm=_0%dvxg{2S`w!$P7 zSPOBQnKcJ~37H%otFuhbIQl ziby>TauCceZ?&iMVEP>v=I3YtL8(Y?Iaruu!YA32KXP)_T^Zlj!H(>eW1N87glfaWnu^VFS zAtIN%wgaF^l3+{WL#9P1Jj$m6!brKa@YXo|eptzVJi}VRq~ub)>XplEPYe;eFC0u& z-WfpWxUzCzuWE=2L9g8Lh63*cTbuE{iG*>C%7M{+NU#$0u$o5O3pZrJK1~|!{pC)s zgfcsZn0~T@njhbv+hx7K-96>J%e$PJgxea34#uFBdOHIbL#6o{F7!=q2Vwg7EW*MV z{R+ifJsLUlNivn_hY!A?k{I&>Iwok1&*(B&*4HAe(<}`<1eiA?5ib0p{mZ26?+1sO(rztsHMjty+YvKR9(im)!eZ1Z;)~)#n8bZoDbe zGq9QEq;<|+cDJ{t1Ju#mPY4{>E(yJy~gT3rJ?7z zfa{n5CVxHll+}f^Ww!|w&NiuVj{{?oo5SA#zssE2C*!if!=k5-`4pF?P4v&Gu(M^g z^_guQZdZQNk|g3>>V1p#Lk0dhFoO+~{_C%Xy2`KNst9#*5=h0#-XkCSYI7R|dOtGO zi>K-RYG{`1^%w^022q-J>_k>MdoMv(m1k&=a>xuBt46_E8*$8#WTJg(T!SwA6lX8! z>1TR99g|2grW*jAqZsQkHq^@jlyGx}Cfhq-y?=3HWL|_$gW_2_X zCSPN`ZRH0tw?~pYoqFI|Q9YoJAyQ@sCr+H($G+6yy7iPe(Sxjw2tI|)jMIG1QO;r| zV*f(kPG=#IRGjwJmX?K{qOqQZ>Y9#LfjS4e8QZh5AsU=MYOfyNK{klIfio8l#N6*| z8`XlBlq~h?wDrn6Z3gwwrCzb+hcVfTV*kUpA(>ArqOjfwmQU|P)n%$@UR%IO(k7Id z{01P_7B?2Bu~TiD^SI#hlN{sO2fab`om&#$=+>{CSQD!Bgt!t-=fb1lh5E*8rur`N z>Qbq@on9*J9-2~r_kH??-lF#`c~x`swl8{tW)8X{dO#J3eFCQ=AT5cr?mLj|{geiZ zN4Kk_m^U7+S)rxUTy<3RQgF4zDKmmnDN#P%<);Ut0)D-G=e9Li#j({D)@dH}iH90R z2B0N_`gO03KX$!{9(=JI6eYo`Nn^xJqMuYbH?Y)iQ5fq z>!R;d9XR}$FgJJj^#duv3ok-lB`u7tzEE!5LxWNa(|$yx&~JcT^l(*OunyBa8R2F{ zf*Za$itr~7s!qDMhEL*6`9sEKg)-FyP6)rA_%_@Wp-1&udK1SANqlz zU>Y)#KMPK4w}PXpO!H|GBE$*@^IVH|gsTvAEP*YnP1=Mo&)x-4Kd$~lo4R9Fn? zLs!o&>c?!VCiF9o7hpc_s;+7@7iuu1D$-&_Nkf0RR=2Y`;bAqiMDC+36b)t;s0(w; zAy!kZ#8ADcXt}m~NufdjhA}M2$;A!ToyKa{I~`|VEx$P7P7l-aMyv|{qQ`EplC3R; zLp*FC27yu|I2}p1OAH{5$;RUsU*q?UPH%%9`NH(fP~u|3@O93ik_&ZSP8kx``4wBM zNq!MEXA1NhsXXAc`I2nbB?$1nDg1NMA*;=P(A7!JY7d*>BOXXj(;izDZmeZcA72kP zM4qo5ZXkq(BeO640*{kvazy!go zrm-$yB~_W0d`GbGAopPR3I)2 zQ&3T%MOl*MzynimAth4LK5q^e5GNIPlS6Pndh-LEx3_BbD;-Y3x5ZF6=vqxO8qU(3 zBt1#m3XYfAURau?j5mHdnnyRWN{<6xMj!Xr zvQiZ1(l+1s^Qn4YGF=+t9(GB}9&szG5(QRmq3U=736j*ANbMMp$4262e{L^RiIx|Y z))i+EuHGZykFTBHM$qgDCWYf_?w51J7#C8a)l`L5Q)9V@b`H026@>bzVb1Dww;lvm zxuK|*WkIwNt?#@5KVLq`r!2Ev?>vx##I3wGBF~Jgi4%b)IU!qZUh7!8N*mxEdUs73SnyPIoJK ze178gE2Vn|F2}stXkQdBFz+o+hNmr1{V`A!P8VD9&>?|6(%^$;9&xj#(-~70#vFT0 zRC2t*)lbU6;r(4*+>OW>u2W(>U%o{B6MM>3&lg`E2^-rkH1F~vrIR!;urj)dm|m;o zGadL~FYPS?)GEWt$I7mlv&?Ez?P>w2JyJSse&M;((Q6W$3$N{CqvoDFP#CZj)Y%pJv>~*%QXbuIf__ z9IyV`%l{gC6U+ zs@kRWCL9;RP`G|PnUr~P8n+t8+#on?b~}~~$I!5QC^V#U`s)7+=m8i0#V{^DNz`s5 zHB`GpRg`CU+M-`(LMnPiZsZw$tDWG${0^D4NtgnljVB?l2oZSA1}AA6J1TUojrNSB zfuKM!qP9F#3s}tF9ZU3o98pw$O5U%t-UC&MEuaY0!p#|RpgBZBF>&cl6J&79jc;m) zkeAGQ+_hfLWPF-bWy@gz#4D^nFZL>9_xt#0`pinCq0kM#9d|PJfJ!c`&s0C81PUn} zi4^V1F^JR;mP55Hr7vSgpocbHOZ#gmw31hA+VxLT6}hsu@Xak5MR`Wj=wb@wjuY43 zMh4Mvu})ic(K=(uu!y<#0B?XH`G2ufAHUzjN04G5v5cs6AHrxxO&-_Wio6Ll@i5!N z`gK6}R`0clcIAp){UMQSuvV|i{{Y|&cADiMcLW1hZg}Lq zC;=i;8&M&D_z{D=kcAEHDMyS-VbEw|1Tjs1`uO@K+a~K%Cs`r0s{C28;1u7lI@Lyy P1Pyb&wU*!g`hWk~L!z&R literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/results.jpeg b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/media/results.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..88e739e0c59f11ffdc15e4622ba996775519a8ef GIT binary patch literal 118242 zcmeFYXIN9s)-b#S0V$yeLMYocH<8`R;Q)-}U`?uV-d6nX=ZJS#4(RN%-~b*Gqt2M?+f!fIt8M z0%qXX3J?L1gXt9cDGG{HRMcRmqlLj}=~(C)z|6wKe1;i(csThu5#XN;Qa}JHa~>^; zKCh>yrgzc$VsLOZQ$mWL7HJMHO0x&aWQcy_>1E=WAR zptUpL;5q$qWU|v&QBF}<{!PZ%X|*?zKdLWc1a4hC>)_?ia?S5?-PapIE6JT#x83~; ziG*izy+XOemHE@YpZ)AFYD!n@5mAb{%hlbt85A7!$>XC7W;22e{@{QSTOTPY1ijh~ zkQ3!CpCE0GE3>Y0i9Tl_QLE0vd2KVGu={5Q;-)SBUfN#VcWxC?%=h67`lrbMIA6PM zKIuO7f|IwmK1+4-7G=4$#IK+Md2DC~o_<`(ZtAS7cgg))0__@|?lhCKJ_HT@Z+<+#36M^y;TW{kx#1DNKcW?%^Qsrts7OGE zq;+%NC0%it%?Z04Uw>X86CokJlviN_|85bj%H>(J$Cb0)8>M;q^qjK5SJGHWNqK54R%lQ`Gl)Wzs<<@8H1f7VA zx-m??)9&++(YZaU>@IDRf4tBu4#c2<5$UCQw(;6LWni*m(b+u>`0y$I*>#D}z$xwp zeEXua{dLG0r@V*T{_I6AH$2)DCPNSyfejv)vivBQoRKe`KQu3TXz zFIT*OYjZqkFJbBWg6n6^8qf)Ow5LcLEnCzG%<=y_BM&w9hKiIe8~)5@Gmxzg5I{Jk z#WJIwMTF^di{pQgT~k;@{#|DJ9GmqUfR_sZwl?Qkm$PpA1%~`hnt$m7DtP7ujyN-A zm=!}bce9|Pq5SBU*}#097iY4rV;`z&WD0tL%`JiIjOp-%w_WLdjhA+^u2?y>^=iEs zs55W&9}c(O@%@ygE9lkeDvYe#=f&ht_SWo+)K^n1L}G>mf_mdwsP=a8(lu4<@8!5W zQ6T9g_eFwAC1PKnKdk4Q!~{|#^=^SQJ2hG0;pIG|vbL5Xm1axxw!vt)LB3K$>uO_K zN5Swze`4O9tDiUnum|_o#tbqaW9kI;+XFKjK9m;jzSwqkm~$~7)q9XT_VTIFm$Y(& zNl~ULWdJalaGyggPk0>Fqft|Ry8ytAX~Q7`*zDzK)C6c9-v`O-TOTwi_5|Gw;oi1y zp>!qYOFnrj3gykNvk-GXy_ViU>$ul_Q}@}NG-`B#zKEgWYGK=}=JzB_c$rc>seZ0; zCsZ)W%M7n_50Ot3;KYsbw?<^J&F?&O3#Hu1yV@J_<|>grSqTW25vUBFMeS|wpOWy5 z3&-VH+PI#&A#V`p(OjHykoSDYEkt#^mQBWY{B@B*3+13|%Gey{OCEW;pe3j#i&2Ng zZBv@Na<%A&SE?`J)<%@B?bj~aCyPD6r`FJVlwY)(#*BfUVi?f#?$#wdv#$E5u zmtKN-n=D=JpZSv9jHov3UMR$^??~GgUK*5D+I}VNoVwcpiSa%v9`}}WbOSEubDHIN zwe%)6!~8E#a!OqpuTKPE{toytub@cYtH2GyhlQkz+RGEOrzECd?nqY-F+CVvpf#y| zqOcmkc4y}`aAV|f(dnyax59`46U1(BtY!~mqA#zU-Qd;mvlCzVs|9^= z5y%w)Bt$D2_azskpgo(?AKKpJ*Hu5xW`|pa&DA+?iDFNu`PbXF6Qr3+NBCTaamve+kPrm=9h?;H<#gZ zCw9ndr;u{`ieKAfMgV{e@tMPTJhbtmv_QdE9|bT*Q6flV)+?s7Olc~wk>)%gjCg9E zu9^KJrAIpD``zH4@Ar7I@mVF3GYS>^<9mwy1J7w4zkak1xP^C^xguzp(45$FREM@z zRi;yVEWKe$+wGKi74rr0ESXFrqov9VvU7j?@s0b5vD@Y+^*(t`CqHj#WvEpV+x+I| zLK&A`(wv8gbOqawinrI_o)Vs;^_|^pm<2aJ_4M6Z%(FZe*Vhag0zumvxpF&NGezb# zppNRZ@J;eprELT9PG22)b2+eZylx_!?kaJ$&G0!VuXWIQb}^q6zwGX46ZSdx!ZOGP zZ~N;a%;fi$z<9_`E?Z@h)AQ@!^DAg^N3P3(stN*+`U)K-2W3hpw9DValV0+oR9$Qf zH2VB+(oTcCELc2o3(A$!qf;7|F0j>j+Wh1t=dC2qpwTB@!!jB+ z-K+lj3?uuBw+T;A3D3Oe+WnFyc)hRi5(80W&LC)3u`;qagVRp#h|l+-WiW8$ z>#GvfDV6uPyyc92)^D@rcx=i;j=wz$^rR_|zd62J?MPqKMw0lz zt@cVJtx%rCs}Iwepi2d5%O53D&OdfDykJ^S=%yAqa4Iim(s&lYED9b$yiL!p7Is<8 z8Vl>~5HFbSv_9lDTz!xRN(N$qnjG0$@Wf{q%oo!PD+7?7ZkOVMKJtkmwo{%{Dq|Pl z%bvN1(8_O}ng%|muG$jrPiGmr&(g8}bk!s7Nk4eJtR%0yHL$yc*_$xD(W~Yh<%KUU zczQ3%^#(&0cbgdZ+AJ!|LW??3bj?K{5H~;+lYmQ4n@8lafJtjZjm<8xv z)fOD|o@LpW$j2AhZfCBZ=Xgi^!LN|>VBuQZp7g3xKLZh|(QxB|;u71YAZ1ZdYMJe1 z%`>NcP(M^hw-tj&LtEaTlBjk?Bt5yH{d}}$Dr-OG{% z(7#RNo>C9f7(#N)jxc$>iPMBGYzhenuk(ry z4)XklpPiC-?X%s^&iEGiefSyZryn1`jpoy*yKyUo7tzh;KeZ#n`W-4rnd;%NEPx zR2C7&Ovj=TW{2UHt3keGbh{Ap+XLp!717rv>A>g!gCl#HN)k{m3py9U|ZN6r2!fUVYJ?A zVEO*|+_7u?()1JBpa)F1a14#k&^Qm^YieCl96b_fd?e`q7oqHFkf3#fj$bjexj?8>ZkHNEE>9 z*#3-^{OpXHz;Y5zuM@KGCxigNK~Yel?|8#}v=Vt{pDYIeKn1hhHS-hZqoFjJr^9o) zp5Ku&StTSbfi|nUS~ORRf8w=NxT;70g}32#_UUTp+?yfWsAAaUSkP@n)W#byoNL>W zCeQ9xwWWIVSo-JAE9uJEr|u9iaGR};dG|$9U@SjEFv$a-SAMTLN+5*3NWOjtP5XND znI}M1a60QnqC@q0gJ%EJautD#6XRaDic4s5?}cNZj(%8neGOP_J0+gG@mlfy%lF)J zdf|e4`O$NoK??ogHjXIo8n0pd;{%b+{Nu>J(;>(4p>*1fxH_jfY45MPTFW*vg4lXU z^kbW(EKK5jNrxf4IhUpNv`8lP28Jlo@aQ=^?F&EYWCOpYo$=kGi&1Uy?HZO5!%RtE-hg2;&D)2WjIjQkQ)K{|^sKL^G;FnEBF{PdrV zj?Xq~4;Bp`SrDJa9F?F}4T5Lvodk*wTIcwAP^W+f>uMCo{ejZ?|B$5 zt8?NXf7ze7pS|e?e-0RlSxhc|FaakLA9ye&rMXTxwaanN%3~1XQF6GH; zwO1rAr_a?IxHnHOfB~L$nV{~uihbi}rtPNVsb!n(-MP&sZ>mfQ`wCX80^cu~o_G(U zyhq+F@8j0CwCJn8J^)lX%~|doFG)|CnG|_~(st+~EuFsbwtEQHN+IIMllGZ5#rrv2 zqi+-}zp$A*d4?Ah_nGA-d`on3$!Qvwx;$ymIfXEF}FGw9tTvqE#73J?hskvpw|p&AaYGqb$0FEXxhWz&0@-VT_r$5tZlgkyug0VUae?QHf3zWJOxZs7%;!OaR= z*;dUj-vjr?kJya28{TEz2|a^#QX1b?*vNHXaXEGys@Nx`!5%C+DXlHHjZY7@%)A%h zZ7L1$WGdvX99OU%Bm_mag!BaCE5}8P<+{d(I{j(0#laVzw2FXRe;bb*I@snw_uH^i zj+M?H*Q_>AY4(8_3Z6A!aJY3$$g}3+d%1_wATCIKdWYq^*6uN(!VGHE4hJU@tQL2~ zmqNS?eoyo|k>}`d6Md|)JMR$O9a$GqZSt9*iLt%{2rJOtN0m5?UvRZVc0>;SuGeu$Do3RS3N=z$yg+ z%7%qhlVKn^@qUV9qAJkiJLpj0czhpUgK%Bfd@|-an36SC-qLk-H3bYGPi7 zfaL#{j35-ssuaKgjQ+6@&<`-G0QAb>ngJl>xMHLR1Ntdcq4Z$6P6aXrr9Z)hoN)Gg z-jF{@{rUNiG8O{2cCdf_&#}MsNp%860|%KN)Mp*k;KP$4HlS#%9!eDk$E(2xDOj;2 zs6kScsxoSW40f3gxU7nBNZ`8!;tf$W1AuV^1?iv=X#|Qyl>(v-FsK3aLx6E2fTIF% zG!%fU5EDQF8KEGJ1xm(`=kkr_q7Q>|W1%3fxHiD615Hzgf?K_^G89yUNNQQXVB-D9NXM9eIgx}M zgOVgeK?3RVtf>V00Wwznq+B|QINnfvMqNA{BA%im9ug}zO(8c+&Ke#eJ{`sSl1vW7 z@=pc}sKg*}paXDG0S;q;UmW};7y-)4sG-v* zxfo!4#xQhQg80>ObgCZuW0JTyEm|T9y%8Z7q5+~&0C;ddm{BO2K7@iR1O^PC#Ur3E zp`c|_lt5r7{(}||MKp)dHv`ZRDg-?^8I;0U6$WU+xahb_PJyC8i<1B))L_35pf3(k z!2x9y7mN>}MU-G!OJFEaM_k}bXowsH+*Bw)|Kb7(0{|CT0o@C|5EqnwFcgl9<0}DYp@V+t2OzrQ00bREhi(QbfGl895J~`|BF9aFf?%Nl1w6!n z4}ihBC}BKFd?lddROyXK0LTyp(*OW5F@7tHUrUJVF8wYsJGp5@5}s{o@ft{ILN@;t?$;X3$E2eIOh=&H*0vkpLpe zt%n@r{?+u`gecLWAZ>m0h9Pj`bp@NLjyHt5iJ8uUOk zrXQSuR4xSE=RyIJXfA*p0Pqx~Nyi(4Q49dV15D%q$s|BZ12B#XF-8HT5fmkn$6~)k z2d5u|b1Oqrw1DOqpkxrrs0=>{p)U!6rvdQ6emJOQ>>%(;84$+;F9#`Rz&4m*;BE>g z&=^3aAMXI5B>><99uWZ05776Y*hvTgCT_K43VgO!tK@}wNc5W(l92Tv#TaR_k`4eL)vg^UAI5Y__~1UDU*F&%4K zBsv^|R!Kt8CkS00MPN12Dx;{xal4M{LvoV2rHHm3SL?38i zTArXwhR_EwaN@Dhz{Q~2rZ6qwGmL2-KCb*06j=ICp+1~6L4e~ZBg7^SKYz!b?f<#7+*I5RD z9H9njPuKTCQ~hQN0m{n%q-;C3G*BQ2|Nlr7#}i>%b&s(Q^ub*XelUdi zFo|dxK(vHl%0n=qv_H^cJ`N)esNf%>5vBSFqB7#@D5xn+3JEJX`?@|#g$xs_iJ4PJ z&5>bvr-TNlO@x<$v4{0w6vfLt+jf;OKww%$)#-jUHRnWpyFPDNMgQrcG1mYBGkK4l_$9cPUwJ zhE6V&4pTxWHyDiyAI5MEBd%(KQwZTAz!LO6sPB}Vlp#7j8IUrN3!j9NPEu?bK+C6u z@KdbmhV-R|@XN<_*oGmvMey@HUXCDSC^dMI&%Z!ST|WLTUyB;*fP!YF3@p$8Lk#3alC2o?{Y16czX77Lh+9BYa=AP!=c z8lqp4BM!*;z%_3qp<5|1<>8oDwCGX|%&Q5^dIbEH4!S%F{*ns)G3MCt`Uxly7f&bW zH;f5Oz!*=+F(zYPPRVT;%B4)nLFnX+rZ5iCm@-Y2aUy0t5|Iv0Gzl6!$Udr#0EzCY)_N%*eDI@k*#~}UHFYEMt055wCrbc-<#d7M z7x104E7vFce#Z9+-qXVSG`^+(AUA2YG6Z3Wr0M0dFJZBI8OO>^CfN zX{lyHkQK_n%0u(?cjwvSW~m$brW?7wzq}s;L6Wpoxi)A{lB6}NvEpV!Pav&XkeMWk z+`3pY(~?tOr}B(yd<8eY@nF@pOCm(6PxC7$ZamhRu3Ii~Zz|%jr;3~He)oZykDe-| zsfJ3MkBlnuc5%tyXnlkB%x2&+D^eRQhP!3qt4Ns}K3YclgVjbK@2L;TQb$cZ&KbZgufEw7Kl_1zEmsB%tt|`6RC3QF_gjRlEQ4Jo3-}ZMUsq+xHPEyvwdS? zBCkuq;ZD8M-O8FsQOaE*x?p#Dlfc3J4TCA`$Hek9eIce?6mXFbhUwr`zxa*CaIfbt%;186=-;ZFI;E21#j<>Ax+Z};TP|j zbq*@R{|qO!qiS-xZy%}85<>e?m5E7#Ov3c3+)K^-IYZ#Lku!OnAq`1AgF(1P^yg5O zj;575$;lKKu|RK`_&Ck;RpCmhqf@21s>ZriER2OVm;A~^#M55LS6oy}@hBR}%C$LE zAmb;4sHp8X=#BTqj+z^VVBaZxp>Wo{k|D3FHt`Fne?xS@l+ETi4}2e+X41D_HxFW> zRag`1{Ip~IA<3dmcXZ3Tg#?qoT8`Sx8I`6(1-SRH{sLsjq;&kz%~%_bUOm6$Ba5x7 z)Q0=)7d`Z}5dnvUrtK^XPwll8a{WFNe@eI!lyllK=-_c{R#BG9d18jlGj9kFy58q@ zZCY@~aQYXMa$=IXme|3yi+|``&Y;iKk)v2U8Wwqm*j%qO(xdPhmN)KlK9$}aZK@!^ z{|lJ#%a~oP4ZnQ3%RR}cqWpm99mg!k?!QdV=PoZTCBA)AmfAb9A6v0JBr8q^hn#9> z&GK7(k##6kahIyMbSK=IveL9oEk>$NM?fdX2o+cO*mGo{ury=a>5qm_&X&UsefwIO zRwmA}E|}LY{#q^&^~!QplP?Kw&zQCVb%8y ze(_1CD$am9?^df=7QppT!IAScz66>|+oB_0 zoL|UGA3Nmvn6G$Y%PmK9k8+WV*A-3AfA@PUyhy{ClaE`J7_BprnO}WqouJz@$#X3c zF3c}mc%*7_Q?2ycOb1up-d@#LOx3d)MzSO!z-QdzMs#(%!zZ`IM%+gI7P+^qWCT?< z!;RW1qwc3%Ql>LLurz()_pw%w^-i1U2)C(IEpE|X@Qiim*-EwcVITKP&%2|0TfHnT zGql(-s)jD()rt-!F4ca#{D*pjql>^SyN22t5+U0ogd_(qyQlVJT?Z-PPvY>;k?k>FixC2PKzAfZ)>z7q!86Sq9%zGrsBV>xvP;VzQ(Cmw#~5TS z(q?DeO|B<=Rw-2AIgek6+nztO9_o<}l)o9LiExNkWz=^vJ3FDY~dhbT^$BR7n_@>8A^LYtv z74j07Yn+9uSbl*PAzZtMKC|9#gQmz*<>o=7Whihe$T-?*ZXAd=*V8zB1A1wxAN4 zaAf7?rL+gRgA=vwiM$F!9#Si?S`Tl2$LJT9TYUU}Qqs?6lSkCbSRQJ63!#l2L-6Oy zoN->2++D0jJ=HUZ*B1xG2RSLI$I0vr10?4zZZNseROP{)wE{E7CJ(N9Gyl8|yq#n( zR`GE#;45i$YiVLH##S+og^;<|xz%^5z9+YNINQ))@;1S`DMurI44qO(+Q2cr`Z>#r zj{jlW+s^L&3dJ!>jjUmhihnqGHI}7CteRO@r39H()aF&^?3PG_R{g;*?YXg7N1d_C#i2)=yqTua zLEQ5R13FqqkMoq%v*84_;xWN|D`74WkI+b5*E|#t2hPW&!t=IL1v6`KTX^p zsLo1^?aqh`&{iuU0GFdUp|xK&G$5j2VO3n;1NMq`6F zGPx0%GELexR6dKOi`5cE#yphGxzJXel-a4}ln!`Y1oJhz_lQJMX)zblarpC|a3hDv zp7xsi^HJ+Squ&drSlb%DEYW^t?s@VKy3W-KGvmB?Rz~wgWFs1n&}Hf*<8IU zmG^0uVx70oq1@f+(&*hwA)U%+;+`sJ=Klhy+!G;via2DUg*m+df7Lde)g-YG+udGg z=O%G2RwPd#AfD^kYQpoA#Bd($)$kZ2QruQFN|f;DVS}Y1U#;uXTbf z>PN!dyUs*jvAm3=G(DO9B!EOOnoR|2^82nMFAV98krB) zZD$v#nIzk+tz6!DzgXxn61pCvD@M@o`^Jv+-eax^NGqk2e6!zRyQC`G^y5>__tH-L z)STN|UiS%gJ+ZrdcprsVKaH`s8}GU~oHm?z&>HI%EG2=R`V97#=fqKF*YQ{ zwJnzm4Q^|WL5qV_DSJ$T(qTEd={Yt`d%Ct?p2i83b0vO|VymTA9o-WD;8siJIQCAu zLE;=b>PU};#?E?7%CPWbM4Rlni0gE>TVGYP*EHFviQ973dN0{&uY63xsWy%Ft+*j^ z=_>W#2D0fSN9}k(Ntp$_lQpVXEjbAYGApfHW~T)~eaP_F1j>T$yQW zw@p-`1*}bbnY-4N-F@NE!#AwO({q8TQ=G|KaC$j0RLg|YYB|S!G@I;d50bPgs=760 zylzf%uCcw!^V{KKj-!_-Zi=4*iNuC*eq5o-%#rj%t8-*GFB|kwvHEt5>*>mu<8bq4 zMXlF01<*vjMll&&K%AmX(MYlTPrkOdJO?vo#(hW@*VCxIJU5x%pjdPRe|7?a8ZRu2 zs-==*})WjoWSDJbmLs}bqWY}|;Y$1fWgIdysmry-VdDs;~DM@V)XrNxnKsnf`+ z5uBG?SJ<8|hkT6@HIOSZ_yr&}La}?P&1#R^lH_c?D`mb~NDtIhFGomz-Eu9SE)Wu)p7fR(YA3# zVM?|2GnqOmT~r*k(#%~77M=1Mn(%3B`Fp%2cPsP=dSu$;=y-xwS^jgFo~(ovzc^Zk znKv)-5v#6nuJZ#a5q%ju!_SWiYFa%6ErCYxRpz;(L)O_}0QEF(7;XOJ4*h2LZOIZ{ zxZI54g&9&}gy<|WR@9iXvi_cKKfy4`^!EB#7)uB!>yNQf&rN@%$9iS(d{5LY0M81s zB%FTMu4&p5Ur-CGJ6rUjg}Om1`n%fK9Q+ZW)5J{5>$9xZW>9u}{?!L4N)$iK z>c;RFvj?Or<{I?5xvnn^PU}P?M07&W*flS3csNar+*3DN`!XFz9f1+}*xga#_}uN3 z$b%Xhg(N+POhW$bJ8W6czA4^SuR7m1dU~S~Zr^;~O0BklrZjU5sEBh%ihjY@SsRr_ z*w#65=d!d@&UQ@k78kV!;wAlcQgYLM$#|6*W7#@AVqL3j96Bw89`NAE3HpkTcZ@&z zc{net?~nCF@6hRtlZ-#@5js$&R^ct;Ub67jH|Pw%-FbhBSAt2u7xV0rOofaoI?mjR zChTmwZ+Di9a~e%di~e5SE$(xDI~sP{Z|yJs0$>^ZB(XmkaTy^%$uHn2+L@$qwsMEv zPRM(O7_~z4mhY|L#XqVX&CX@b-<+-pFfDrK9#3Psx2ib*^N~SiB}ttTeYA}wzX+5j zAmaSkD5bAJVY7hq(~^_}O6iiO*G!CC=DWsr^O2ZY8u+?q-{5oihxxIFeP7XUUbv*n znrAdUJBN{XK<&~dv;;orh&|0=(pGCd)^IKM3yw42IkG*nF49$q>6)Ql^hHQX1biXH z6|xY2Q@MR<*Fwm9p^zCn_+a{E8Kj*8_6_;(BtN@j6V+)isnWqvJ*$>=DFu^rTD0vI z%#l?y4GWC>Iz92)RS!p08^t;s1@edydg2|{(Kv&bm-Qw-yE9$e!xz_EoL}v+Dea<< zVnf~#5i72wv^EFd?`30b$9qKlN8pPltWhYP!nLcw-jcTK)Y^A&K za~StRwvBnZe$b!*t97Gpjo?*rmo3frD~kpxi?oyCa}(%}kjllkw0hz*WotUOx8dvb z$Vz|*>oDw6J!|)X0_Qc~wMJWv8qV&L8b5sHj~nUPNfExMWR%IT49^R%87R2S6y0~x z+Yxt_Uy3z0G-GT!tqv9;x-gxe(R|7ay#B~fDJku9rS_c}^Og;2(HB}-=G7ZBbN7{A zv$UDut!hsWiZmcp9!lM)OTS2R*4d*&jIXeN;C4jayh6%#bAi=BCY*{GyLpeWyicMc z0^DelTy>S~zs|L&aUg}7Vw)ZXIBvO>SNp^+PR_ID-sVi#i&sK!QRX;!3hCA$+4|*w z0fTez+9SuvAh`F^nmB!F{3NkkC%(^q)J&$@i+fb?xz04MW2wQ*&`YP|x_~}Q8ai9i zedR-J*h*hqy!3KBuo$ejYi)`j!I=xCiqCxUihaQ4yR(GTXJd1*=n!9S@}{aprnu8S z9IG5q(IFRS7n$7k6fdJYC#CueSpNlBZvFz6RxjN z%lC|)>htQ&nv5z>3w2Cb$qVKeNSFB{Dk3lF79+s7HP&tqwGE~KD3{q&Z*qatGFbvk zSIHVA(<=-M;@(DFdVbAn0Bvje8lvC3OOx5Wf#zPi&MEUv%Q!aA;rYE1hpP6pg52^u z!)Z&Ri^k7+ROaQzn!LDGhR95e6(94-{-N*|d)GKa0okDgs_MpQ9=Rn8H z&hv_e9~TueEc_zm&P2d>@2J9*+g_rrnL?!Am>I^Q3$Og$ZdFm|>AV-eV)#|uzxrCR zM1?m~a09$VOh-tT$jwi+S)~8;?zjxyBw+Y;w)Jdl@)y;o-~qE`KU!%LqxwpCD+$~@ za~=`?V8PM1sPml-wv0=6Yx>$yZl-Q+@G&K#S0E{FnRA^>Obhw4%IAeK*(gta+)~rG z7%^?3r;Ta1*Cix>`UhjrrSACWPT4oCA0gNLrC((5kMbRwk9=XEx@jCF{3N+A=All- z>ck!T(|iz!-t_(%tbK-ZnXMa5e~ig&JlrIa;eINcm7@29I-i>OO*gXfX^(j}ZL#Z0 z1)sL1LN;^Y)E@bD++w5EWgDuiYTToI=JTy0_hV?aNsT&HzQmjxt1;?X;{148Yc^ZT zFyhpfa!Fvb(mE!0OP13ua{u6lFYy2u{IA+MCIuBXw>$UF}}&W zDTY>hjI~Wu_cr0_tv#)E#r5s!Q6uWylKDFLW?Br=gfFowrORnSsG5zpJw|sn_UXRr zI=6wg(7diw@s0S+dYjyPgN%n|3deL*uIZ)dCNfI1#q_9RmcFal#_CtlN zyck!pO_gPqvO7{guzlx`d3&=htedG(?`KJ`fSYb2o5#2Y&&8%IF<9Glb#EJNZ%o)` zs@ag&;%&9ld>J0*qvt=8ElJkLErS{mNkfzAZM{35{a7JAs<+O%J=7>!om&2_lm99I zZR>x=bXt_+n&D6OLOjNd4vYr#ac~@1oR_Z5(BIIok|3c`0P&|IW>pM zZy8r(@VcLbUNJ;v?->vNhv3|sqvBy*?8;gzUc!>vqG7Z0KRKQL0#VvB{i>%O?IEcR zD-@>Tkr2`9!kL4sRs3m`sIu^-sFe)g_C`^jd8#C@e?bhpy1D(nn_@elJF|i^=<@eJ zxuA!Ib;u+ySWOdaE&l~u(Z&LE9@fjsS_|G0ZOHaWa%0suPyXNKL*GZmp{nD3TBHp1 z|D_!1_nh)NKc`Nj5z7l~jD~iX-L~^Mou&S1ii2ZS36eDGmw(C%kol*z27$U1pX`rCbx(; zP;VjGQ(7^KiC^($Jzzd95+`9p@|B|KzcJLCDZkVFzV)OhwU z%lxbumyo!)!;Za^|Ve zi)`1#F*Qocds`w0u@)vyjV?wW^CTKQv-|=q0jD3?Y-N<&y^Pu1VF=GqAAC!quk`$! zplF45R3R727NNap4aR~)0MS8gD@KT`Fhh~MYcbk+=zJN=r+V|paXG3Ba=o0381e^~ zN|bN^0=RI&wZWB7e&+oGw7d=2o`)1C24gCeQ}X`6Jra71b{?dEDI&Vb=s%(%s@|mo zzC3*h>p1%0QmC_yJ3Eic3~P+yTfCEL?U{ZZZOg1;FvaNoWbOj5U;pmg&kjqShi|Mu zv~Kacbek{ZnP~4nt(>DeE%WKct~_?T$VBvRj*e-!ld!-``R|^VhH9?19bbJVmyWI- zEKbr_mZIxIYD>O;o#Z$xTZQx$)ktXtEZS7-M&O?G1BRt3jy=c57v z_f<^K``(-@rDYxa>_2C!Z+7&KQRaJn7FjFc-#3&VGrtkO%uRJQ(>|ndX%UZH$Wp1? zjcgS5gugK9WRUr0?QQK?R*Ng|5?2$Qr@W3(o8P6CQDKM3@32NP8(qkfT)X40@6feC zs!4BnR4IFJqLzNSK>7jX0^3(gN@%(=x2}mni-I?Wb))gUyvB<{s^4UO0X8}lq9S~U z>iS%bDt>p2JnvRXo8?{jvX((2~qM(?ry3R%Svfvmkl!p=OvrGhQiH++}Um| zJ8p%ky_J3g{pw1=c+d1G3gPljPj~MsJKu;F=d6Qat)X0kj*YNz^G$5N>=S;K6?FId z+sfcERi;zQ*{3BNp0`<6e~z-BmFF^THrvu9;A1ZdYxkn};qyQwpi#atS=qtgUOT&d zukLg=7CKjo8}guJ3oIR?je9LUc-A^y*eZ;qAV6p_(4EqE);YY5N(*N<#cgAV9uyFu zHgLBUw!9?#Ae+@Bxo3RqU5%*Zs988_Oy40bZB|qozSwl?E$v;=v$-EgXv=>AjgJXu zUvxDUa#P|=Qz}waR>1GYDUAV|W|k#16?5esPT|~aXxPE|JMf;F&Uo$K@}izQu~gWh z5iaoCZQVCDGh!u0_*XeP@EIz*aIx!8*xh99eUS>27v}~m`MEOb$-nBPmB7;|o@2PT;zmBj62*>vEgm~?Gp&K08~*HXsTW)g2vW$wzqs*kDmMh2_bQfT91DA^4|YPl*H zk%4z2kvfq$6^`N(Y)X41R~YTg2EVoul{S<^b65(-Z=P+$+|zu~i3@c<-(YzQ@+6}d zJ8LR)Lrj}rnwf1lu8Un$`_ipf{79f8Om@&T(tMVNh6ZDlZKimh-EZ#lEk$mz`X77Q z;>}rW9q6iOK3>o1yMziA=Bz)PQVstCvXSU9qn6eEW$x~3115q?tr_!O`jCyzv5Ikq zN{{`_T_cYu&oXs0-)h~vp~Ked=y)B^x+=yBlo=nx60JDPhVFf$h|2qDc9k4w55EX9 zX!Y&8AJ^%Y5r0@;kfLkooXwCvDsSJ5YM(%Q*!8AV@@mby`LB=!?$8g;?4-mYYU&uT zDcdgjMrC{;)An1><4hE$%)PFca{30&Zb@ZaUir>d?koOr(2hgl>I(rnb(dIzwVs?0X%h|Xu$G-Mdd?0wz)pzkg|<@W6w@8KOfmh-qNg=Uq>^-C&JGk_!TQ~(Zr9?`ziFgKp`95 zF2yxx%Zp;c4J}rq@(lVH-5|-oQO>?Cb31=(3zo?;m=U=1 zVd>?TcpJ~p%tjfXCg1Z$wHk-$UadrCr4IrE-Ep*bf4J+zjRI8uNKL*yUe zOCOlNPPFGE4te>dQN3+BBY0c=l76j;u#R1wX-BooKr4Tdzv}2i-2OMS>JmKx_k*WB z?|xpg8?Dv||E89K-?x46&aNU`M4skS(l3D7{~XRi+uXY^BjO5qae<=?X~L0pq`qjG zkTeC8j~mmLTYb?psb)O&EEsn4k*;5`BW*{^uu8?L^@!f7)1@>%pj|9|7n6g&;uDNEsoRCKMd^0x(AkQ&U0tBr+L)la&d zz2ne_#eUe;Yu~J~V30O4taf{^K=v z)qAK`WMtCXBrA}9oG{s$jUJh3Gxg*)1#hsh>p9+@e(UWOKZYZ?flO%J`$VQ7B2!Uc z$CKDP21mEREf&H2i8CH=5}E9*?--YRRR}zu3&em1(~$BD$UU1H8?Ebq1v4R(g=yMb zJeMH6GrPPtSE-GiE!W|6Sxc$5)7dfPEzn}v{sgrep z`}U#VJXMBETnD92V$c9jNny)5zN|#=D`xVsT{|?bbJ)brjHCg>pUPOKS}u*)hQ2(% zH;L3%G;DtJ8E*N9Ob5}GgDb-7jQUblZ=+jTvuD!kWUYf@yYS^6`)~ADb_?9{_T~-} znXauky7#1(UlDTz5iJvr%9VV}Jf{uENEi#XN%3ISpl2_U$&^d9DxWJTx5+%A1W6`}*D51)f5{7_zTm!{BC+-02o#=h^4w9d4?snN{t<$=8ez67jz z>G#&s%9PP2R8k!_HW@`k@C&nrRELD9Q#6oUk(aiIze{8f;CZ}rPiUqqTSDGr45hB! z?%G7^9cK)fv>keQcd*q`3a+uq?AtI|FPggLfjq`D6r=wRGSne!2i_jdNY7Vqu?e(p z3vK~`5ZS5tQqOwBF|_wL(%-#eQ}9Y;B@RZu0xx6Ac5=%vWf6#OrPeQWGpO@eVw3p^ z{n3XdAf-hsiUZv82syl~c%H3u|+wNX|nj zn9DS7e!r}-CVZ@gIue6ik7u~lgL}L4VuKT=vy{)(6rRRwcYC#;`Vd38xlvy3McluF zcGVix`PLiEBcArK=>bnMcDMj|)eu();kpCI@ACdgsUj8pL z20Gy&1CG8BC5F!5?(o;x(}i)(i*j+iyc6b2=HW8z6~I_wVf>uCHZxsN#rfU+%xcS; zCdV^5*EI&wOZRjqjoAj~>)3slrA_Tj;~LQwI&(}Het>t`&3A`YKSy84N*@0OyoP7Z z&r+WM+S1TnOR#WqTdG8Xd*{kaNV7IT^$QqQ4sxz{(9s-wRBZx7&Fga&ZmK3~wIe(8 z&rJIV4p<0M?~SR)cH8T^j)|spbhzbn-)3a6#$S9j8IAI8b0~|YPh1M5w2+ z2s?LTHCl0!k}?R$!T7;w=>QldJ*|8nP&;xjj5^LQ!65~IAi)u^R&2FW%A{u+n5$W!0ro~Dz7 z^UbKQYX4XYcf;#u=gj_K=!8GcBQqrg^H9lSWP?<>72nc#I?d?jUQx1rX0`d)#?NZp z)L-LYEvSo!*(bK{7eKK>Tb;gCmD-tZXFCJI6jjBS!k&ptwtGc0jlTU~?7e9?n_b&B z98*h_DnUyJQxIdTF?KSG5F!#oOr_N(2sO4ksH#v}ts$X;A`--0BIXWgFGH!sP^2g| zwp3e6ONTGLuIGNP>$%_i+rIbL_v`tywzb)uD{CF=IM=Z6$9|m2m8`@qO9(@S>q2?y z3O&CU|F`vaV3EK~?6gDz&6SO>+UqpiS=F8yTnWLY>?e*aQYv|A05`1@;M-7y7o5W` zli#2Fjzm&Sx8|+ETGhxY;lIk;Dmo3;!+{RBUNwVWP#`WIFGu4ZI06pHqm|nBlwZ~T zu3s-$sTFrc&?x#yJ(gK3{{DX$<1fPq!lbeC0ZreE&T40?h=(I~uiv`x`Hb~#I|WYL zUTs^|apuTnA{82@=(0$us=E8Nz2}PW|8SB2Y6Q_mcgg(SNDWJc&GE0= zBP7XJJ54CJ=30Uo(uOoK{orj&+-wkh9pQv+Dl&O4=UN&PZ!PohrMY~XI~9xj1)!O8 z(`;;u8+`+2SwpCTF6FU34i22figQ1PB=#s^aD=jj`IIyw`JMdqo_~}Q3A3JyOg5J- z;gPn3SloE=sUHji(bmTV$lPMX$Pg2Q03ldGj~tJwk8~;2%5bzkT-#>B+i?FZrFB(F z>BU%{1r)QvBO}63~-wd*xyFCkdI;xpPs?I3vuV{0qKSsxggZ-~80~i)a3@ zrjb|~)|aM&N!;JGa{xO0Ayyq@@P`ro8Icy8WeJ7nc#Yw_qLY{&u zOvoy|FECJopt_VjS*#E@JFAeaJs5&(<)sl7hM7yIl>#qC!=#jtINGepsyH$*d`1yN z+ruR3|Od@cG3t9GnCz}4o*RZidTPVm@4@FyLDQdVuSxzj!M>?i;;c8eyX7Q}p zPUAmhGI{W65zO3Nf|O$F-2TSpVDzchqmYuHrb+gzuNe?mH!a{!4`gd=&^=X^hTGHidb1;Gnhvms3J0!S z)pp%M`ne{jx!PQ;S{*unxL3~3vv3VIuH;rcCoSq6&mBb!Xz7XB8PVtgc*CK0iHmKb zXI{Fgz0Un^{R`mKZM)X~3!pz}+zZ?J%;P0FG1q3x zbK|K)tM-zWdU+)ooO+rycTwFY>4=B1+rc!}R5I@5rz04<9s|tw^NzM`LL%w?UFhxB zGF6aV_&M9E3tiZWOhizXYmT<+-+^*pFW?kFQ--@WvRg&c{`9EcUMy}n! zObMpOeEi~-n^oVVW?8R@ea5Cae-82lIL`|Fh#C0&fbeZIy(&!dwsYIz=EPOH`!gW5 ziD#ip8Pav+8%y$puf;c4rCQf(NKH0SYH}6bImlpy;$z(a8oV)wrelZ4#)QU*b^sRX zp%n&S908U4d0`|CetM)=<%ZS8l_VSVdW!=@hP>+!;``eU9#w-dT=Hk%eS*DZAD)^? z4ePYh0(OaMT!W(DC6>;N24_oMZtK|Yct&m>*EMBVt;9sFw(4)Isj9u?B!Abum`Fm+ z(9Q{5(+rDe7%?J=fEyaY$JFt#n2*angE?lMcqe9VokV?8N+#;QR|s;f?7>&wRd>&o zpI(PPpq2BX?^ue80+Zn<_ic^#zuCTcrCh93;(yw|yxIN$uP9hf zXRpND>&<{@S`v8?W3sgc+}K|Lktw(5@I#Yg5nnstzMK+?V3YD63KEeqH=fmyRks6c zWvL=gZU}>?p>Z4%HD^cja-wG@Uc{NGcucW7`z)^KHLTk5Yh|WPx&-e|N0c#VN9vw9 z@X5u3*|QbgpGt-c$p;Av)I~LBQPM*FwHn7<>t?irgl<>1=(ZkjibA57;3$X4gxBQg zbR@F)j~<=VbInvw&zBO@)gc zAK-=o6YBCwiIb#&_Rp!B?CM)%Sy-4U!XVkaL}g$j#ZkeyEGPVCp*RnoT8n+F>7^m^ za-~YH!)vxZ5OMQ-!nbSM8aZFC}!}^oMrg=Rolqvmg#*Q>^CQENTWdfA>p> z#ByhyrfDc1e$VFmtt{|*yh`y~pA^MDex$BQ!7%B>5v^y$VY>d}hzv^?F)~3urYU5P zw<0|5#Ey!}dPMnLf>=o-kG_)a#SOA+9t2~54A?iFdmDTQxZy8~u7#sp0^v6i50qq{ zUWn&LUB`u+W`3JCNZBe}N;(&$QwgcIu4$8~w=1sUJN5$aA1dRSqC559O<(JFadJ(T zB=b}_;4w8UPzEB4^!J?ZW}F+hyB(>x(}~!m9iYP+M-zIlCll>TOI2Tg+2}~lvX$j# zoh#MtP3-W;a}MO3HU=lLhuaLA2?+}-pf9D7wbK*Todnc-pfEG#ymll}!&5hpFa{iq zDebhAT{p2@vdLlXF;-}(w5ZGyU%BwQvo_BIhK2tD+YRjDxAoHIx`mk51c?;5lm#5Q zlQFosUdnM9CnUTkaVlvbp9Ma#Hnq$5gQzQ|5@Y9c6PEf$cD&qEf0(YWG`_Mz%#HvN zxtSI9=98q|U{Mq_$MDI(PPkRGxyHpO9&%XGkPjdK3qpRwTRBaz%YjbIG8JP-D`kPE~*+ofpWKb@lH_Jmnhe~EN$e&;V`mbbsXw#BsI z+5oA_eFMCvEvvz4@MnLBr`32-&Z6B}{vV6%g3l8{JPSTf>%mIh?S zGHxK@kOmTPa&^0qcO@}-b3n2O^QrQb=`VmIsrpN(W8|)rfhr`1Hxiflv5HD2dd1Eu zypc1N4IM?~D4JK$z_z05^BK9IiSDQrIful<;q$jL^|Q`$Lk@>uycBJ{yzAMc#Bu&~ zwV|@u6#jsyh}yV@)Mf7Q0(e%b{rHQP=>{L1V$ZNcAIOMuzA7Jw&l2F=(HO?1w{(pQ z7sq@(>jV9~B2#q#-gc{P@>oy(l6=rN|IIio1Z<_+pLJFf1~wfiodnf$a>8l)S5iI) z95u&PUV-hifXAHkt*!S6gg0m<0qK5D!!25a$si4f7+Ku`hkKKL&W=Bri85X069zea zJjoJ>^`aKp0=;dvZ`Js|Gp-#s9w|LA4yyQ8q<%rYx8X1mJG0L*z3U!t?GAqO!ejAT zL8>uoi_Pzv8x`1d5N`LvK=LT&9nWZ+Nf!?xR;4gy0ncbD{QC%#{WL|V5(u<^S!#Q z_GArj_u95#gRDm$*FL_R@JLa5XOct&)4#RJEr@HMGwWQGHBAgrB0WCEG|G~Y`Ju7j zuUERNjq;_l8J!e`s!BE8IW?Wt+z($bEB|Tz_)<(YqIdjFoi$zMy%4hAXt`GPCMi<; zXSLGqpMH_Sc=>Wss<%}$<6GxkM0zsJJpIuAiTYzP2UVU0&0*J5SXXPepJAB{mmCwX zR5`Z*uMao~D%Z`$;=w7J-RMx;hgjf9i)3YwKCvM{-m)cjOvk(daw4@SjuJ$Z(C#`U z+JMV#pyNv(?Nd02P*wmBUu?eV>4&b3Cvk9`0m*_aGxfEk!yooJGILU) z!JZd~mbagnqTet93C_O&OIKW~==BgrcEy`}1czt3d$jNOy_v$5egqZAE; z3^R)FYrTBDp#tu8<0@lpGmhkk1OTA7AE_xyb~zuF;l=z11eFz&_pQo*Klc}5kYs5lTf0E!`g6FB!M7^9$WKGe7W6qh4CedNc+BOYz&yYCAR<1rI zNvU|K*UVMX-p6}tbvkNADs#OOy4kLIQN{gX(e;nqhPP71bFHjC=K?t=Z_3fd&UpeI zR>SU;R>Sw7`oTl5vrc`89b`md?j%|CguS^s=ym-R;78%hxuUg`;!*MUCeQF3=|f`X zC_WfK2F2}s9AT^hCB(mQW92L>Pnsp#IrJ)uj}n{X$*=m78f4Jpw{;{ASy33(Qw{5I z)?g%rRdbky#I1Zqi3IVX(wg@aihyNquu?6ohtd9}JTf0nA56FNYhj24V7x#EG5iwo zaa8@h^H25%QG|*geQh$H*Nc67UZE!DNLU~|`NPL@8}tMr%0>9kjR$nzX5M{+C~{f# zO>%TSO`^MqWLAEUMx}ajM68 zC1h^Bf0^*Pk1)ix_|zW7^mIun1T(HUuGzL(3gGY3=MxFl?6KXZ-7+a2 zt;l@~18UkN4Ak~djOk=VR4(ZfH!lF(sZ*b+9fGoF*sT&PHWfJ123>I8g94<6=<>>{eGD7E0l5{MR2 za}~m{AW2C{@$pMZY>W$+3qzjuxL{NFgZezKy52A(P97*2Kk+ySkcxo3Im9&n&Wt(# zbar(uWV@YdyZ^m)BfzTYmcut(ZkG=#zvb-F3Ye`uKGZ|Em<&r& z(T4dP5i%;#d`#b;XWFswY2p_k0*_8A;g@g`#bbo@U$G8kXZxrcMFa~tJiwMesOq|I zIhyg1w1>vGkCQs%@}jeI(7g5I0qgbgqX(#>!q+4Xcsm!Vc*r0iNzJaR6@A+ijwIh@ z`-uKHiF$mtv0=hOTyeqQy*SX80C(6r4-2YvW?b!Gf>;^^5&JrVfEsxNZqE!#&=+v`bk z!ph@sJU|X+y}X_JM&sEHfnDlwG`fQ`W^mr{O~r{7xz zu62V__txg4MYESEZJwG^SwoUuuFsVIXTF?ry3K;d?In~|P z=pcHsAnDDi-s>-vJ9!(X-N{gm$8-6Us=85xOb(4(gnDLWrgVtn{JD~W&^kBPCFe3N z74o_`i7*bHU+O#jy~V+#8zJct_C+_OrSpDeYnda=NU`^t_19p8`o2jGslFP6(0QFsxv_in?) z%+9wq_uoX6AG2EGR`ZTXqEy?@uN+<%Hme4B_urL0MTAJnCaR*3o`JTEA>EOX( za^y>WRB#*2ke_mdubp4UZIX?<2gD9<9PK+|@C`))i#zF3C-GBajqhCE&L`BFXzaO< zz(IroVHs0hHuF~n{=)UY8jnj}kLEG(&G{tz#RzyP4!*pD+%F(H$b9B(JqnbT7K(f& zu07m)d@F5hgWuVLf!tD+_g{w?Y7ZAbd)43j`G@2Hv3Quar}6oX95~dL`5tuJInzQL znUG_uC=54y4H2RZ3;GB9hz%s<1WCk$^ z&FUtAaI`Q|F!)o1);Mnk5$_4UZ53R6WoBqd4wzyMl&Hk-)VQkHFE7RtN@duSXG3jiHFAn)($ruhh}jdk|J zAz&*+C}#*n7h3q;@xM<6oEBiVzVSk1_zQ!+Hl zxF*~pthP^Mew3hg33l*+)J3n;RbRyncxg>yNAhZr>1GgVxgyiPxy#VkQG*eX;-T_n zzLN_0n({cIi_dS|M&-%j^!)BcgD1j;gAA#C`35<0SQd!i_Jh( zWNdhaFF53}`0#KgLAv^u2Law#;3BF;8D`E0gmE815nMuCDi%jmeQH)-t7Vmm)t;z( zHc;398NUGBUr@`YdpMX=3h1h20cDb}e+UOB|ERv1?MBGNE*g-w6O#(g{hr7JeyQ%b zvY8uhcG_SoTXQp2TKiuA13Ne;C0P&4(_tw^73`7$5r3djBb2RW}k`EkR?%t@oMp&Wwf}3g2B|XaQp~l8DHIpe; zCQEy5gBeHSGv8(WZ{>fD@Si*(k=$Bho*R40sNF8o_Kr#e`u=~e#M{;0M6>vQ2+jb9 z`tFV{f?-uftrpIjst$3z6QP<851l+aj~@Pz5|6L*&dR0g-z-Nf27_2YowDd&htjjX zRe>;~vg$HSK8nZ2{)4uS*QBLh`}9``OlB!tAMVDB;AnV~nPXdqO#5)?(}3-uwg*u1 zB<6<+ebl{-Vtaxr)0u%U8xPtYaeaOA;*m$VvZm&E85+Qh-Cz|F-8?NkyD3>4Z$N;$ zU<vhasP+C;AY*+n#^UNkbiii2|LicM_@`4m`Q4t+JjNP%kK_x!9DlV)L~}@a z?sxFmA@z3n7vXQ#lCGsTJ%`iV%uga&GznZW_mD7}NwGo?p1x9Kr3hc-NgVHvqN0t3 zMNwGh3QdM4a2X-*f@*M9Z|})}UG&ek%*Z5udgf1L!dX`jY{;BYl0rfHV5A9AL02bi5pBD7(KYybT1e|8kY5G>5#I}arjxwl^&|0c z>tyry5rh%ua6<%`A*3t1kdXPOfXUu)g^PouKCya*|O{vRYs1yxa$Ap3J~Y-ah{pKtpjDJb`EJj!GCaCzE^nN`W{+J4-v@X$9wi zqZ*VCD+kyxzn=!zbtc5G@)p#8a|=5Z5L3u{|6zhhZD$v!i2*mPTJtP(#iEz zb;R4k2S(M9G#Qn+O15Ft`!oOUQs$IYc%^oOPt;e>)^rnu;pez1wS-=x*th{lYj#B@ z^LfP-%mbyqWH9=b=Y5R-^1gq1fFeVP0%pBzml;YnK^UBrz@`G3tcgH`(|NTeXq0}` zJJVp(GTt1#Jh%9dx%kD_;>aduT)`{xB0813a0XZJ&YVa&KkIr@*-e&x_-wOL# zMLuNpiHYqP{SSSR!DVt!3I84?bo8rPNJe|5dT^7gHYPlx1YJsli-8Kh%PLBwHZ?$qhA5nwdoEVrUmK3cJt18Hha2hXI+S% zJKhIw$=3QnrdVFkO=QQq9v{&4RQH|M;W=~PN>y;|f~{`Vmmm%GuJ2TgZv}SPA)! z&97)`#$n{!(VCN*pO2`UMg`NH9CNPV(%dlUt(6~x=tG^=-z}Rww0efy-v+Zntd8y< z&)EN9zg=cyaJfQZ%=5R?a|-r%o}DCFjesjTMrM&|)h!)$MY{vN)dY*0g= zS!nHp(626s%m|Ev+b-GhfIDlItd7{?GZ{}ubbbLsc+jmeqeV zm3p$|r9PBkoh?^wJuMYa0>hZ&Gr-iy`_Vwlb|3s4x{*{_HY7K<@kv$lyruxhfcput zb5ah4Y7bGGAJARy*)JG|e(F-MH*;@`=sV0xFa;~xSqb?CQ;~(*41{8V(GT||3;M&Z^9>sWY&oMd`z1s@S6{|1KVpl#hSd3UpuMC(} z95$-54TlAk?eSX2XnrEjnSArH5zTr+`9l@l*=3ig8Tm-IO4~^4|DaZznLKJ$?;6zg ztV8Wa(uT9SPlWct)ZuR^Rh>aQhyjS0hA2p@lK+vBA+@k%6(il!(E|xZ9p!sOZZb#b zqMO@eP05A_NKl$Rh^*JO_9}E!&B(){u=8kI#Ry4mUJ6TjVoskj&yEZ(>@AxJtgYXp zML(0}k}2Y^ubT5RA%=Cd@#MthyN&?34~Mc`epmG{RL0~b>D=wAx8x)w@8Vr zKty0Lw$p$++5}3?|J+xqMOy{!0au0m^z}*o*cOU^k{3lWzwCMMJcZrz&|oELGW@P~ z9Xf$8!nHVgIVsZ{u9~iGL>4`vaGiO5Taib-U!M?dk<}{nOY3*Q4rV_77fuV*G<9Bh z`wOsIJ-6G0#C)4n2r2#gA#RL>jAB3r1SK5UEmOM;iv8yYHd>WVqK(aatrF%&n8a{V z^0B&aKa<)=%^qd0RF#U`x|uo~q@2+(^MD99!2s*mnFfq2Xb$}~f4xcd8=)qpVY0~bxX5$cXo_KyNo_N7|`5EZ5NV+xtjIF?V{Rj@q~L@QpQN;3SleZjSlO-R+xMI8 zp}36CL|=lf%iSDJ7Y|XfA012irC$J#!AV@%IfN6HZsK z$a$CoRcWfIt-&r2g-N6Yk#G~A@-P~E7t{|iV4LL8sAOvp}HapWI7W2dLHp@TjW2v_u{KY ze+E`}yW@5iF4{^_A&2gp`*{`-BFUMz5kB8H% zH2kIPAD-L3YeHkg2&(aO`=Nm@jjz9^_-g$$~Qsx;ttMG^mQ)qvq1XGI0|C<~E?ne50B8gRQ+$EP6Abyctjzd<_* zUUt3<))3z59eP+41;numXL*4%dX3A87OJjrccCc3;%Vlre#eSpU3a$t^&{-f$%9TB ze*xHwpG!r$_j*UIL|ZKD`S_baU%OsTIZE{%0b@V5M%M0{RZje}I#nc6cp|ilPRu*v}Jb>Q{Yt zl-knuhJSOX{}~G2?@^u^{YCl9vS4u{s&L-h)c*@a6J^eyO1t^_*o^PHzFr6{TupfY zVEON})qg^RA8P6PMw)VVN+YXMCI<3U6P&aG2hH3>PDlmYY@6rUs zp`t3O^kYj@QtxFKip4(74DIpus!il|xBM2^Cu{s~yfh%(G014UFT49u05ecTSh=s) zwST`q{UjR(D7*;q2pL^;?=j1vr1i@g1sz85la3%szVRM1Ok$=I=& zn0;G|W3PRJS0jBZN{V9_$EWsGXeWf)$BqZ0W>StO?m4?$8z+TtMF%pZc{(x!Ml`dC zQp3!+R-o>DP05n)c1eY1`^vbQ_7WB^Hl&*ly@ECT1rRg%?ss`b`Pu&2#U1yr?3=$& z!JA!5EodE_9ZHSyxMQ?(-ak2Y0bv^MP-dNE-(PrjI1yb9^~^1=z8D)bu<)7vEq~`R zbH~*qQ=878lk#{>gtmxRET^e`Jetb;bfc!-{@FassHk>IW*EwQU{Uwpx+gcOTH9cfCo-On~_ z&MDlBKAR#J*qjQUC5i1jyE=c%ToObN)Zl&?dn0au;2-q4>Y2N?&os0y)$P>YCVjfq z#Y--kwuou{F*oh{mtc{Fh?Z*~V@DnJNXv1r3*jPm5jHj$L8DBuhWIEK004O@DcF-1 zO9HKYHIv*N8oM~j@6M4EoNBnLSnM%h2;r@b@{aljcc0Ecv?g^#TKrG5`lQjS@x$If zKJqa1LOAqVb3rC2cTs91(JkFz343+`=_v??z)fKQG1E(McMiWfkK#*xVha@Jvo2vi z)vny>O_Bc-cB0 zQ4w$w(qagxkW#!--Oqq!*nOZ%{oNo#-Isjoyaak!@V5rAJkb~D_nsu>HGi0M<4K#S z>nl30(wF5PqB=m@ZZ#cx2v3)Y6NgJgU`-JXoSRp}b;lx4LgV$?^(^obZloSHE_9Rk%5ceYLmX_i=Y_uFhMdk29Fjew&nm?hmLm}oa$E>dEu7897 zNS~RC++{Dx0C|=f&whOeaVsxJxx4DyWt(_0pjKALcFE>PJ&8?SbJSKIz)-=H>q9 zL~J^fHSKEZ+=7OaYZ+1B%ESxG-W)9*zxYpHH>oGngAeVUX7Hhf1@5`?q7jSlzx=88 zXIF;R=evUO6r-AnafZmCyKW+^)ec#BxpFvdN|Lw(b6b-TM}9RO_CxU9<4dZ}j`fP4 zbbUiR>1KfU{Q^2Z8h_}YL^vStB)|WmDII(cqoYkDH%{=|BaQ~8GQoXv&Y>n<)*t@A zsoe(}xnxw%g>Vvp`HlvEWk29TD7U4$y#6w=t9BsgLLrl;XfO-Oe(@DsC9Uz}1Jrej z7f+a@vy`p3k)`DgUrZy%9HIJ-fr|UXa%#MI@79bOFM+EDnp7ASi*~4$YF10@v$(;e z7{yiFDc5y|eDNe*zvP<#2VE+K$x3>tfnAKJxOzJ9lUwEcM0^#8whz8=Xv5T_jV)V~ z!yPWX*Q%V~kz@#Q&9Gn0=Us3=GfpTH^0gj)?1WxQs`ZgVJwR>lGId49%D9fgrH zRhc6N{%-Ip7*u&G>tA%}-fqwA;1OwFWYkvY4;~wO)FBq1Sm#W+;`_&BP(NY~Q{)CS zI~a_8bRqG8-QDB3z@yZ{eeol4sZaPXUwl3F|H|_kzgXHsU%vr}IFnbXwt0VboAJ%Y zI_50xe1h%QVJD@AJqdzzY*bGV>s3R?DKGhBw^t@UROpk|2M)-4%;^8~D4?Zx&I5(h zYMvv2-BD6H-PvtW^PoDdDqh6i;Xyy&KO`hc3;;l{(%`FCi{)IKCPJ~9_}Thip!F}n zbn(<3M=#o?qTD>d;O;ZW2c;0|x{@GS%2i7G?%vQutVcNP5~%76hp5Ep^-cINz&k|W zulHKh|L>^G1)S}V`C~ud#W)=;WJ8`6YSzfP8VzJ)A+|uA-S`#px-}wWsH-LsIp;(j zV^LN2`T7^~PK{cUu>PAzx4?y>h;o>6Xo`RV373KAF`Ok@3D`n0gIJzi;ASIA)V55x z=k+6b-d{#}tZ!f=>Tr5<&B}#a7QPx-R(Z&|MG+VXz$cRBs{&%CeT**@X&$=a6~UF0 z3CJiyIF}Bhk!Z^KX=g&|Fl+}Dk@GHot=T;=Mbw3RnjZKmMHJ2d)T~TYTY`@Om8<UA_YuX237vFU8m<~%X zr!O0n0fDr=yADfVAn5m=TO6p5_DX8GeoHP$HV&W2{qZYFnhDw$6*-nR0Z%B^VH-M1^W`DA4t);fAY!vf}(Gw9Y4;3GRw?e0Pt^ zuwLKpFiS&i9d>z8;m$GGLDeMM$gO?vQv{Arspb-Qcqkn1a(5#b9yphpi8W77ALo(B z94LXv(?kCP0Cw8f)pq^@`0Pk&sj6|k`dje~^t$+mmoK8wQd&hWAdXaujP+w9@eh6h z1}YOq0s=cNwX@FxD=o!`?~C=wt3S%J>Dj;Z>5k^fS8_$bm-Wv}Ey8YV2zzw~7A5Mo z`nqp44)1ljR;6z(ogbLuL_9zK7X80}{9l9o|KJQ!I*WEItako)4FU;om-CQ`uGa0TwvayC z#SR?%<~L{QmT`i= zavA02$!FyRISGqwUjM`NfL=c@%;LHOOU|9KBR=(#B%HouIZvEO9vj8W9 z*LW|_wOnwMz6wJ_Q0%OZfLYba&q$^3r-#l0?hi3vm%mJ;ocYi~jZCV>lk+aET5`l}e|@gjszL=%r_+|BgEK_u4#EFO%r1 zTsrC7*~P@YO>%6DxS_3Ms_gn-9saZHyLzNSHy^ye!;9rtD(kwwC+Oq z^l$&WcmCxhJ%_QR9UPvqQ}$_>gx+uTN8lrfOVmX2Jp!Gu~3td(wLSu)rE2YbQj(6bGBG?U*MWfAsd%~YJo;x@x zsxmiT@Lq%UloRg{SASgB7&eKmeD-qnzx_f5xGb)^YQuAtzXln7B^eh0FzFxt)5^bn zBbr-%_-z=$8CH;gd1>c&BY<63;qaXPkGEE8I81bjQ|q5Lz-;_AWW|Kk0KRYe1Bjm! ziWhktZ?O|Jff)|$S>aD?myp)lM)feP(<>hIM8rr)`e!!L#LuPq&ZZ16T~)Fle!2{= z57{c8kPK`{&#Xw&CG3&^9<{_v25R_luq|5VQaDb)g~W zRmmQaU|+|g9>udwEiNzP_u0s5^jI|O-;F9;yW9BVYAO&OakoBXbJIdt*qXix8Ef28A}Mc88y{ zT9iA%|3ukM?geZqeGPy8l=3Wjc@DerCpcPOof9JNb1ARvzA3zPKFuIMkOd~?0h8Ht zAxPw8QS<)ON4kLlZ_0OmsR(O$=l%8~rZU zJH~0+5-+qe^bSg_ZAv<~A3w22EKV|9jx}J|Vk6o=!;|WWR`8@cUJE}?O@edp%Ra(*6h{Ogj?!@_k?jX8I9h4_IGwkx zs5H%O`(J=H=buk`IvtfS_t`$z#yaOot^GV@kwn|&7wA*TR&`6wl4kx@HDlCcz-m#-wCiiLr~B$zq$Dfu(6jy zXxMR2gA9frt4yj4dk;tPz$&kAb3VU$)%wUa!gTMdl!+C*$1oB&65sT>>cSC?=ms(A zgixGIPuvba_3ItUx4NAbVVkmk1PEIv)_ZP`PYe>kf3ri^OIBqIV|an`W9F>U@>>y$ z8wv)zfC*}Ip|1UL`U|v5gAQ3R4g0=&PTYuI+RDcUF@3DrysT;SE$WqW=#;~ouPYz@ z*5U-bpXYY)NP}P$xLwwUncGpz?0+mwpxfJlygedJg1P-4l3zS(G9Z~>j)ZnGcH~dW zynB#E!v1kgGBpNRa_h^utoAEB?Wk$A&SW8G6KVNl+!cLiuuv{y)yknX`*NM+JME^7 z*OT~RJW+9oQOuweuP3?IVeg;W7<(BWbI^%%`W*DPR2GTo^_9k1!lnTe?>!WTkm|o*F@{*ZXL5yyQ(fSx5A4K??ojWH(9@t`|ICId<{t zbVbShHF^1&ewp}v2@;Ze<%yqvKGbSR3#6MS=3`wb=k$M~lTMA;6|%$7?2i01VTGx? zI^GK?TJBCb-~L}?QEK>nBG0MiD2=$P~IXnVg5*LRL!h+mJ3uec1n2*rO}- zRk=qo?*m9Y-%_COE#?4<@ETup6aU7G#jhQ|{^Q5MR%+(R93Nyy-T<@M8wnI$5#D*Z zu(O%_fuz3ccyo|o;J#O%lM;J+HJyWF>RYVZ9#ccL8G=T9j=`lO8SJ##3X?A=X)2$g z$C&UguR5 zgNt?zQ!)>WPRNV54X{SN%1|hL>HX+z{XNA5DxUo0x|^vfOqhRyG&}=t3QOllIyX!U zY2o=44S!547SK^#)@r;#$JHS^WI2J;9#|yOtJP`+&UyCKFsMRij9L~yKq;Skw5zo| zNg7|T=5KnWh`ft?2CF?J(~*<~aY$1y?q$q?>S%)TQ=4(U(BhO6n)QS!AxV<0Vr%VA zbq_6RZ`L4EKdn|gVYZa(1Kqro6yRBCD`I9lqQFRP?coVdK$#rZ>}xUl@pKVmfXK@! z;a#t5W0POyIliBMn5Rmcs%C*+H={I20C5lfj`Yv)I@q9^U%<|B;nR@japCSmysW1g z&asun$^9QSXTZC>2_h}4xlRe~QZHbRmH{+~woW^Zwxz<;FL^Slufj%g<#pv(|i!HL=$EN%j=mdEM!QW^`?R~m?sc;jm;X9tY(Dw8gfAv-IRuUgVm>u?C} z>DnizGO9!GmWfK9v1k%QA^apCqHvbrtAs_3woNzwy{DhNL6uJ!{X4e3TTsqxi9a8IPf* zuLL?w^o*1JvzmdKVA|_L6-Tqbqs=5#4_Q`GVk_5mPZY$$BmkD3vDQ@1jZMID+c?6U z)xmV_x;GjpKN&|6nA5Q2zfx7O3oe)@H&3amSl4h3xa8$>(?3 zL0@w+Ip)s(#Sg+BHxO$tFfMN1f9k{gw%Jwch2}$NYg6mvb?;yF&-I;~N;9pUlUcDE z=Uk^NT}vzAi8E^{(mJj0)FK>N56+A$;ATWza{5e{-jO#k8^2_!{RcbDIGxTQ_l>JoESiFH*LqSS^*VSigiWGlpx) zxpwr3hBm1UO~KUE1G5cvND=l`*O%z0jP9d4>g3%y%CO2(B}!qp%R_{=2p`_7*L&O< zE#e|L7>HK@dC`zuuLQXu{gb9R6-Y&L)Ogg!!gPLBO5sCC1 zy7iYg#wVuEQi0+b4z-vz)%yQNcFUMnFaNSe(sTOISm=EFvaQ12Fb8*rk7`9%wrsXtzzvI#SwE48r>y~d3%@pnbhrRcVYI5t=hUwC! zS5Z176cGprB0}haAcTYxnkXQlDb)=k3KA3)q$G5Ngaim(2t}GoQHr$CBosGDZx$?o zWq&vO+50)?eD8VQcZ}~H-=8xce%!zyxmj!8Gi$CjuWMdY`u?aE1`Zak)hZAf4Ju#X zOsJ;|kP?<7cBUnSnBohnMF7>7aGk**CO(jZ^NT^d($}d^9Gdvvz@|DXek6{Js%2&Q zleS5I`D-h*4fCW*fqXNzia@MJe}K8e>v>iVo#`RSNg0NC_5qD;x>@VSSD?}+xu#kF z?xz$kC->lp+Lz4SWE98SZwZ>T&6FG`B24% zhS@QNT9}KB2PlK31noJ48r3-Ev^V3&eR`@=uTR97Y0~f^wo$i=>u5C<(WZj$`HTH$ zg9fi%>l7M=+U>S$SC55bviiEYI1;Atpvt#RDjk7Psy_S-Ul_c0u;jr8L za^?$}R<;o!j}d+wD%s89v8SKrcHSPO;(h8qvXTp4VP+Qa@lH1=`1#|YfS+D3(^i^! z%mM1@;tW9~0RD`*UZ`6M9qF*x%B3dk29$}vkPxYDo!jz!sMD?X*`d;r{#%Z(8X{0g z_TGG}P`7D@L#=0!9ETdIb2uWv*#=i3eEr(lpwij(M_Y>s1eH#(mtez;?bYnxZ`2hA z49Pd>2Vwq$A(5ql`L)FTZ%*MdR5{{>+WT4JwxsQ{E{)fATn4kTk|joyE6CShcefun zL;+7hMy*)6oO}4~->@qWm(x7pAFpEk@~EYA$u)*QuTy0r=T!VF&VK;1O%<8-MLc80 z-pUp8zOA2X+dexKY&=!OA2xd0x5uR=xyTnl;{6>3Dul~zYRoOav`O(_O}FX znFzY0yKR+zk%b}7+Lz)>8M3KI*^UbY&S4MO`g&>$srL5$aWdIWQ8zBkO9Y)5OyU4r z1*s5kWbzKT*Vu*_tH3DZWkE8lD>Gg6zMOXEdPKPZE* zj*i0~Pi)BuIc@Oib7YGEt}56uR`g&v6uWSRY4~+g`MtZlFij)MDhRWnbKgfz?*UD& z?_N~7ex=Q?4@tf~M83moP3=R`U)W<&CYfC@lLsc@6n)D2*Pi;Wl^MHgPR0?>#F(S# zU9dCF^FV1L!8?UPK5|M*9$vL|%_h0lUpUKS{WrF&Vjd63$T?mg3)*&4XwY)+!LG39 zti6N4g{p#IHz z+n`sj;4!kxX@ed%H(rRUT4R$sVFGCbYb(tT8U&!6#p{(OXWzef&`5hcgyh#Fxzb*f z#=tF=P4j6#CTWkF?CkyB*hVo~o2GMpnh%WHc^Dy$u8qca;kKEp)`~;8zBO(I)TwvvcKcpvvyfj zobK{0$CLKHX_SfP4iRJRht5|sq#XtBDoyvFyQ;TaaY;Lh_(B2ZXo1?9e2WVc7a{thF7}c&o(EqRhgdO9i4*r8}`F z?Oap(4NDJ=^-Q4sPs2-ylq)y7aA!1-CvPF2R?D9DESLc_C-u}96v)N)7?fk@&}cd! zSLU(fbt*T}XV#RKHQK0yzD2%$jr%}fC$ZPyPHlu&YT)*O;OLWMO95P2*}VYxsvP{V=-4PNc_g3ZZ_pFY5f3tWZ(UoxD-dU}|M_ zOm@>I;%&GNiio@MEjdrdQ_KFJk^m^b!Dk&W#}^~02d1-uA_Mb*>OC2ZSn4~p2p#GM zx>9hrazW;$f;;}wY;*xTKot$Ycj4-alEG)yH#g9%AZDj^H%zmG8HrykG6)gWxAl6B zl`844uTM~IN}$N-N!^TiiG>7~shB<6nCGdVHA>(_PHBWrfEwhgkUI&w{!;}}w8BVpZAnF_sSD-3x@}^Vb)ccfWwCn88$1@>>P&UKQ?qLI|4M2A-In&fw zcXgb)3znd)EK+7SDF$+dhPyC33siC1OJE_}AK6*>%_(p0A}HkGStqavP8oWdz& zjrt!0qy~XFRv8~Q@}AwNX&~859b))*Y#%HBJGOh#2h~h>&RuGnj}v(iIyldT;gU|z znXM2=OJfjXw)>EU1|LNxoY&0fn5|AlZTR|Njgf z{jh|4r~+On^IxO8Y|n%Gjetb50v**Ce6PFM8@$CD;F-GLT@Ny);mPMBx=1V8L9P?Z z($@6`^D|DWZ8DZH%JpiVSPyBhG@ydk3^U_CDvkjear8@xg(BR0-Kfa#?@Z@kIIcn> zjlGDK)x*U<+W!vkCD6CH)z63SZ8AYRb@`c3mZ)v5g0W8UJCs3W_~vz89f zTEp)2y?|O-0o#!~-NsHVR}0UnEP3TLS~bW5ui*3|S~Yug(#?52tw>{gmFTZ8v26nz z!?FoqO%NG7;ALHt|2E7EbX!dTX%tm$99^(JZwvQ#l$Wq21^(A4@3?w1^snvSs~9#k z=g|{Q0!nnZ&Z%Df^1%k zXcgl@kHy*$%t^JsHy$4l>IMlTrjG5Q-b#pQuGOCRK)HWRyhaP#RO!?hI<&oIcH^Z~ z1#Qic*!GV5Eop7SDaI@RbHQQq`-fVwnML;h%ZtUIS&fv}?04&K=zS4>E9U!C>ECbt zx0(L!691Q<96yPc(lvk9+s+-&#Qr~gwz^!PaG+_t|AnG;CBDrEV1F7_pi+1(FwxWE z;JoA|R{h^)(4$rTiN2*=fn^G3>lP>1i()RT!LC7Tno38JhL;rzVP$g$n-tsuDd^mP zLAWtoAh39f1=1@=!tkm|$&9mRXb&S%T;fp_AOgXE`C4beCgR!0(POs%;F^q6aLN<- zA0mVU<7_`);$+!+jaJ#o%$;u6_!D*Gg}iBA9CzmQGsyBJ_ zQ^o&bq2t!9wg%v;ikf{@O-S_!@O@MZ?a{gmj9r^M!)&V-SXsp)JO1@5_`%tm@3i~H zCE#Cjm!aoQeF0WtP_z^5&*^LIAex_ETvY+iwj3L0p8z?pf3@KL`CtEFfc*3C+KA6J z`0!u;ZNJwIL{wk-Rnhj&^reS=)W4q6b1%FUaCaD8Ci9W6c1IDRiXAJY0}dlK*taq=~H-HoI8>U-7`vu^ws`Z5d{}|htJu# z2d78OlmFG-xVV#10VBX;WKFz7alQKs2=2hn`!XE~Oh)|D)B_LCWZ=m(R{U=^4FvDMUZ$!ELe(BkUPN4C z>i0hwj_j=E`q!*P0<8aW`@c37qmU5z@C4_tCpTly{Iy5<53d6p<}a@s8;qzpa+|HDFpLx$eZJfw&#M z2LSrzq{TQ;U+bIK$(Nqs`smRFW%6F)_MyF^Keg;i=n(qC10bgiy1I{^4`K9|@sDi7 zh~l0}JETR(3w_VDKNM~71kFE57@s#06s^nzn$~;kUVK+xws8Dh=7EJ}o}L-L?2^v* zJw7LOv$R3ZdMn|mUcM1~~wn~=CQ3K$%rHqRU@(jGT<28my2=3)* ztERW{{)Qh&I#eF8acR^G7;U0H$C3sLa5nEc{`D@=;9;eT@kT)P*ts%64ppv6qOSDr zIa3%;+&8egiPNcT5m2_i-7W|!>%t}d{_c^rCOffKu{EXbUC|lk_vywVy1MY(;Cn8i zzK-rMk*s{|CP^(Pn!2}CButh1D_kVJcc8~h7FPrafKa!&zCjJHtOWb|W~$wH?(7^1 z?YS~_#oRHyt~Xcsx)3yDma4qqU#rYKNhCD}c^OX5bN#6o!Mxg{l_txm4cRt8+IpG| z|7%YSkhV@#qAspl3po(DR;A1|s4_9R%d^k=N9WUvr#~j&;~(&MDhBP=KPk=SjJ!k- zS1ZYjSk23e1ggGDGP2k3Yrkr6w$w3a)BVW{a4llkpXJwrGV0H0jv!QEjT}*C} z%d)6$!%0271FAibaj*vR#%vLGsdyZ#zoVU~>7+2*c-J$65jeXK<5pAHkEc-J5%+Vm zgJ|=2Jb!cX7?lZZNV=pzog#N(ivD2GO3>bJ-&B9n(@M5?9K-1U@{pUU#qGZ7!t4?f zq|d2O;UTM}v>PwX#4&*^*qTe#)NM!P52CYm4Z6at2V`t%w>MlTYfqeLfCDVD(fzAi zSLD2I)d5+Fg0j^+>u>JZL%w6rz>34;0z-|&)7MG{ja>)Q?JIP5Kb`>V%*FVaq zkJ;HtQWEf8jYfft4QLlGaP=MmC3FnCg39tugVmvVA{|JzT!n?1Cmee@QQF!g`gVNS zCJdQnNxWEMvmtDkKcZvv5=Iw=Im}D2eYBx)Fk3_nyp0_BJFm)FY2@_%^^KpOe-OR3 zN*q#kgfGLvDqLP!-n~P`RnqdvsJ916PkpGf!Ngvf#JwP^YNPpC!9GT1j|9FGu&kKv z)R8g$S!?(ShLot@3@oQ8Q)X?UXS=f2>(<=x03JdD!I3tOUbAQ9VZs;3na<8ws`Zyw zYVWl|cVhRggFvGz##DRlz1z@tfyxgy9I~I)3k@0~mQy4B_svt?s88_88&_)qiAj;- z^5sJFO_Tah$@QPhP=bXrAeewax{-lA6T6%;_aH@{fnB>M0_Z6{%ldjFgz$}~t;)*m zR~>xv=ZCCZ*rtHWfukg*bfPVyQmg2G(;F30#*ptcE;!3mI|ff%5it-9XU0~7Hug&4 zO&#-Hch1(Kz$D{qNDKl%izELJS#M}a+Rp92Q3byhzf8Q=UUT=u!3niX+||=I&CoGb zqIz=ysMdLYc2SzoSlr0h#mbI>&x|)>-^_&;U_MaF(egvR?rFx=WVG)Lj+xD6x=III>Rn(N45=?vpw`RR?w?F3xC%>9?GM%mBW}9?_s+tu;SP(fPWwez1AU9#Iw&TKCHBC`8C*J8D zM;4BE5+wTY%l_Z3E&blm9OHl9UJyzY$lzfB_VP8N_fXjsJ;}_+W2~jfYQmF)wiDogxO7xq(f z`u;*QF9u}!m@8t&*I;rEH5=Tgua_uF_7i@P$1&s47i+&_U7to(F)y?h=9Ejf@WNo_~XBrqQiguf)%DxCfG)j)`;?WiRwq zw-!l_r200TR+54mB6QQ!vMKLJyXNvtzyBT%b)yIQb1A@bd0dUtPWtI%8pI^Y*WZO# z1pD6JaOWdL^#y|Mq1)2STXzBV-5*4e?+W4|-VKq?p#}LDI-e>YS?SDv5I*%=VdqQ$ zh(tqy^pi^8Wb>$DMeTD!oTRtyQzR)}Rx3e54IZz%ymsVYw|X`{Q@S-&j+C^G`XR%% zEKkFq{2Mb!)T>w?j9C@F(I-}z?Q=z&shQpE_Q8yx;pMzUG5h}+GH~mE>Rm$gIV!}4sHr0p*2izA2Fx1g-ji?*EB`Iw=ot^& z+N(NBT~hgiHd`-Pu$&dB1|<=(3EdKcmH;X%-669HV= zSA_bkFK=cG-FiFs3XehbBX~BVdIhwe>Sc;mdgfktkCmWOw}c1e0ioPkFvG$^#z*Q- z?MT;#9j2?$OEGOm*;C$SjT-z7@x%}*6`flKWo{eIE9kKq)LcYWH6Kh3-|7<}~F^WIffB(Ju2NA>V>=wc3C{Ghr?bal& z(#Q!c$%mc?_i{}<7bz#N5H5V@wl&On7twkDmbi2E;U5ftgZteL{+YoR9#_w*70=p_ zO>+296|6Y=wq|RzK^2^#RM%gHqqAyMb|W(U$}u%)zNUV!?Hdzg6@V5JYjSm6WY8Ld zh(vIyNp7a%RX(IJBiDCog8(YT5z*7T&`x27k?D0NVD6)U+}(f?2`HOIibwzn`_1D6 zoB~K;n_kSvJ2ZhtRo>9X=Ru4<;cNW49|5!*!0T7OrZBth1~i0%Eey^1JoSf zw`?vMD@8s$rxzB%88EXX@?Vqx!b+;Gs1DC5jjFJyyv*Q^G-tnxt;m>vYzVr6!lbK~ zcHfB;^AP};tg3kzSr+8Q-SpPZ^Oc7TNnymqD+2#-6 z`fM(DSEHJK=!zi(DvdCadj4~>>0zPWIMw4dJh4zEv>8H8M&T=v{W@QkI z3mf-y1hyY(-CmINX*!fU+(+W7b;^E|<`vgro4-lmm|H)Vx zZM-$Lc4YaaN?1@|#k=>@TtQq-i>`2sP7#_20;#VEv=OSv<{X_~IRrHdLX?Lrq3^*v zRR5$TN7_VJrv0+5=L8fcyTAy)K=_qhQsgU5}fzqba|-O(z2 z6Ty8ZakYm0obi~;*GevxtXtn2RE5WS4)u7i|EhCAC*dQuke_&`Sdn&G3D5_vE{2*1QWd{XIKl!-&i9}M^E z3w6&n=VA{$Y=ZordTVPPL+N~9a`y_dN%Zm^wvIcq9kCKY#pA(e+YKu>uG3Or#y=RqY zur&pw*1cSEAr5}|#ToUn$PT`eLLC!ZLKdHTGYl4|gz!q6{gF2YMx>_RVP3n@P5Ao0 zM}Wi;h|^K!er=74 zq^-l^3y27?*)z27%oS1#L!(=Y<9Cg$Remfl7F3LPgQP_|Du^_H3eV$+E?X)o>a$R4e@C-9 z+nEmZ8bKn)lO?*W5;-$EFA{3-CAWi7Rkn3>Y}9mBf>J?fQm^577z-;VoHF|Ob$vo- z2Y5b2*j&!E;81TCZFDJV3P;Bq@#aj+wRIbA&Sc)|36Bp=Y%dsRx9UeMS1y1G;MP=R zDt`B@!MjQqKosiJ2{R})@tRy+F|fU|4%RxGu_H`ER68r><0DMtC&^H!?2OgQDhlc(QD=p(NAFdLN4=~o%}3#-n1?iSBQvnlN_iC%g6^^VqYR}3 znvFWQOC`LTpz(|h8MfZ5m#nN4DAMZ5@{gmNet7ZN2e?dy0^ILA<6a$7?en7w;v(TG zi4;~-8nY$*vy>JJIVA`lu=ZwtEo6HY$u^3w_R^~Wl)&7wF)E*rq|Ko?*Ahl7zyw#~G&1V);9fqQR z{R%}x5u`ZU^sIAtV+oH#j*g>9cQAtp_*i~JkLHfBd+@03=6%f}sV|&hw$kF9QHUR0 zv-N#y3tFJnz3%R(dkXaH8H6gkuPLwoV@$$%Dssbc4oda;kC(4;&BCDhFdUXYF9Pb5 z2aRJ!9fABf1zm?>h#|jf9|{_?)us-lU7xmY=8aBF7jkA~>nuMpHa4SNNPnp1F{#Yj zEaAhNAS^7AHxAD8nORbF!BM~*>r)E@yLY<1)XMawC$6$`Akr@8S8(y*@qfLHGf1Vr zgy?_%m+T51oh0Y3GHaTeJE-8A@5CZ+3d zjkme z=MQ|PrAZjMcoqWXm#}MP1^!+4T8(Ayu5Pe1*^@vD>joYVY8{oR50Da?_$HE1Qk zmk=5ds%&V*m1_@Z?1ZsCl_TnKn#YL>1VbQXxS=P(Raas|tFa;fzWmz;|9|)aL0Haa=hnynvwK}n41&;9zmKddr7cm-IAGf- z>#M^OglquIRVoQkIJ=@gbvIX5q9CT}O|ioRE(681>{ppwtSWZvTV*|_m1en(3NOc^ zX(+Po++OXE@W1*U{)!7E_{dLDCX7p41RnC7(k-^Kz3MV6CLXP%z$suRWTqnWAZUiC zKJg231(xYK@=Kq2u;o?)pusR+ou6{e`I}gGYU6_PC@*mN^QZqom*M|$Gy0$Nj7CeB zv}>mKOL+CCfBWBG2;B65=#lnc@z)A<6$hRwES!c~oTV(gp0#S(ke*b3i zS;P-Ko+C=0?%s_I`K%8IQUgg$iy7G})l)n`vI}v1c;-LRkfi*$Rtk zLVJX7ZxI5ev*EJ&=2fqT`6}Qf)jjR4Cu3`;jCuyq0((`QLKyYx^PC~NJ#4*0PE}6HzWt#a zoEv3KqLZ3L{HiS;BQ7Xsg&TASv(QSMu)QiKXTQA{)KJ?dYbO0va2_M6Ih|6BS!8#p zS)}_XI5$$jeU7`2y2QhQ<0<9SZ?5sYhco~>c8LnY<7gFo8MwkL-GJ_ld=UsZ=_VW) zqOAS;+0f+k#amc|^;_R25-3gc?4@IsXZElI)=62a7+qvlWw7U5x3z;xW`}|b6@wd2&JaH@(+{#D2-(106=J0DMy7n&_+%sUgBZ@D z%%5jLIzOga5;_pIBQh5HWA(eK4Y+(oSEm^+K5sg$EO8H8AMTd({&C}8Tky2=iez{E z#Dcc@=Phb9RGs$No>Cr!TOaXhtTHI0p038|Qq)B7v&PxOC&pz|I+ScYubJ0odC0#| zFenGO^jVSikUI%B){UhAvP$GVG;MElhOHxlpL$8^(FtmGH5Xx1lLJtgkw>_lcG<9! z8-{|i_Jm7b?x@4FG8p&m`}8+tp4cl;{6&wy98qd*9TJ4amF2tbCH4DI$E}|-5`7O@Eysd+IX30jtVc@}e#f_^zg3h^IRTWiWBd7Cn zK!?HPg`iWdTDGG%Gv6}4n))g%)C}7gvFF&k^fcOWlV0*$gwnoaMu0%H19&*QEI3va zhZs)?;LuLgd$2qk^A-N{sbi^9jqMq0tI`BN9B@WynK{>r7FGF!p-Gqo9p$ve4SQFw zU!b#8%0^X7S^@ZYNmH53;)>`Af2%LeG>!oY2|a-BjUW%b<-95wTHkB=$x8j~?<2u3 zMgNqJ=FSeMu;IXArt5n{5V5Ql8<=db0-6=mDfD3^^9txqS(@-})tRBfTxhh23~~o@ zPl5c=#{S8z-lIZ*r*S0M3Ib95weW^YbXdZGoDlI%W1Cv&-vuQlxj!gVGNS-=QOlA=t>7&sUa&B z#Hr>vQS*9tMeIt3ZSNf8t$))pc3E-ex}hS>HKd|UR?Bkxr(jjnI^z}l@;9d9k;y`t z4%Z255;UQ4|N4^Rs8T_7njMK{4~32osC~BicqYqqXe7Xx!W3S9 z{UC|Rw)qR&A6Xz@9w2nSBm}ELt1aK)i`WIg;HnUL@6PN%{qf)?yC*~Xdn6D}J=+)P z`Dz8rKoIQA*dJ;k_4i66AgeF3yvr-*y@9UjV!mLW&WPuMHT$NS>=4GA5!I$w-`D_h z5XO?MY!SsDMY>`3@&V+V7zrW!7lop==RcH=lF;TZKD*c^EZvs6c_z`@)i{V4f6rTn z0}4ebv(<8Bdsi$$(k@~oFAX-<>SKi+%L2V}S65(8`fJ0}V!nD*xEh>M+(*p!W>8bs zbiVNnKtCqo%8~p~InE*B^ffLk;1Ku0X9f(Gb79$)H%n@-4FYjsIP9GYA~HqftZwP` zIQ(vK)|aV5aLpZ_eeK~nxA_RTpMSvcLlq%$L{hU{-HWWN1{CDz9gLj}V!J%Up$5NG zc0bowfRy$)B?!p*b~PB+%FojD35eG!=$P4ye)Fs(VX}q3|8|PNX)e9=%7z|S9*}7| z@II1=ok{D#Pt{+1rD>H~cFiabenRj>X)d!Ta31Ab;uHdB*7n%6S=s%0f|Fp|hH4Ql z<<5_mN+Oa|!e$&U;?&*AAX6a2K5MD@Iul{AMRBWq_8+(RvmYLEd*?8$;q2d|NN4$s z!OL+E3F6ft$AUxd`A(D;Ol(RT*n|6on1?{qfq?;rk_ZnYfYS!GkUApC?iA_Y!y{fCsTY63uC?<-;z-L+=>p{DeXv;fI8hS zZ2u3WRibkpfhG%9<|4eVj6$Y& zll%o$hx91fT>Aw?IElg~cHLYdX1ktPlF7i`k^O!SXrw3;o*zOOQ|3CAy?$!#>40eL zPTW=B2IsL-U-u5>DFMqAF%xA6O4DFuk&^CMO8A4Q53U^dwxcL-UXbD0GtQb~M1@5Dwl^kw*4H8s{6E&%7 zIuwwCo3}y{L0LW|hHP9j`{a;Bg5jpe@~hO;LAcrun-Yxn0TcjVPO%3hHG=@GWLJb2 zXd1+o?3f(#enHkt=PM%D^vkz1m!U(i`ilUfEZ=hX?Zv_1?^Y%W+ohd>ws39lsW+kc zOL;#aru1tV$^*#=oGe+%6Z0RFx|p$IMAK(OlOpfJUPPguqQZuKG;vmkX(M% z{sk!0ifvh8Dqgj0_k;J-(>F}MX7&$o)&dP|MZ{um7iL^|_3O>`+h4@5bd+5vOVYGD zQTj};GH|=NRZ*xBrcqU^z&2uesL>sGUfr6~rxp=-K-x|Ka_@Mu5i|=?QfrXO+pKm* z<^EV$RvM0ZH4`TT2q2mlgBG*xIhLml)?l?e<*iCCUX~82x>2yG;`d2Uy&;N^6j3Fx zs96L*)pV}@gtFbwpbd}{k?mQ6jc!Rn&>9*_D?{Kaht1y$cxULrW5XP0W{krxdR`H^ zgCd=@IhgTJ+$3pK^2B}Fa4oZ_FHDP?!~JSytr=NtQx3-{Ep9>79-5m&JwERbFx=YXHveu1Y$R{^9qwmjZbJe^j%}sp}oGlNf?;AVcoA<9z3W_ z5zI{uE>gOQ8BK~fVZvxNu<~a8=W2Vq)^2sK?wkcsVeRd=N*+ytWsUsQ9(m6dfws!U zHVGvP`@tg|G%=m(0574)MP7+O54hHE97h02{6Rj zZ1AU`(}GoueO%AUuJ{eUZ9gi)!W|g-H5=kv=)xp>_Ui%S{uhq_O^)J9ec18+b&#Q` z%S9Z#L7gOe((DC?Vp?Lj&N0_Y=V)&*QzZ(Twnw-NWOs_}<+c0)+5-eC92Rxy$~NeOGcQI78A@( z!HO~5UVd+~`5n8TLT@dm_#5Nlv45sCUut7mqYvnN@!gI(dETjyeEYf~rlbDycbiJJ zc^K%>qY5K41+WziYb+$IF=V$joI5d{U!e+Lyos??BUP)E=}E_Zyps@L52u0@z2)?2bjh|Tv7xY`;`@CUd^d9(l);?CRI>352h1)Fm&~AS zpy&>iKhZhn?ZqO$2ZY!`&Ru~nB*Dk?DyExS7vW?$j(zcM>AIR%I3?Ex!SA2n>sl zhBVi=m2eInv;H%t+h|uyIUF`1*I8C((Cv zk9mx>3M+O!XHg+X!sIR_a5$P#+rXo90Y!m+t|gde`EZN-SrW!9Jg73C8|HCsd=J+% z*hj)a2_VzFRV`ud1L*Qhh>w3~#{;_`4>=;^YOWSWdgq-S;Sb$W`^OeSJTkTQ^X^pW z`SjvoKz1B%8@(8)-9$Ar80tPfAkrN$b=-XZrZfZVvHJ=xOn0Kla_5b9vDVtvmp%vS zZAm>Lm@RrEQ{2<<`EOBx9S{C3M;1mwTT0MQC9U?qCTYPgcwqs3cNw=7-Ir38HD!en zIsj8$k_H-(yib7W6PoN&P>MJTKxa|`W#ZMi8uyNvOK<+#OuJS%b zA{-mOhQM$0Nm`<_*!;<{CuMzd%}EDMsV`>OP#KVu*n3(zJ1Lx;@u}gHGh&f7t&f5J ziysmtjccyDgRRK7e*N9~LSx4>!eFaSrBj=S?oh~C17!U}aC+mfgJ(Fs8kNhZ=YcH} z7=;I!mXT>Aa)1&yF%SJwvx-%Sd_$POHoyK<_LJs`Y&T|TXv?zs7<M1op%q_qMd|FMV2xVVS!V5-%Pj=58IqhO|X_vYudo@)!>>OK(x!b^QY0Nn9vY= z44q;zcvcJ2C#J=iJqI1p86`A(^xeQ!$~N=GkahhjWUmRr-P0q{tUXl|v%tMBI2owyPk7zqxF`Y`8gRNl1C{@qYR(cRA4uLbm~kGf^VL>-pO;Y!OI%v5qG@#_Tq^-kL6H<9;v?%;`PNY3q8`g_No>q2h z?MVAxv@8)qR%Sw%MvcKFmSQm=kz?^NWb)c$Uq%+>Z%T!(h(~Ht(gg$+*BGK$3r<9R z#NY+Va0Kt4+7>$Zc}Q7nlIOL7Pr}g4Z8-I7a8p|MuxtOWuz-i+$M*n``o!^ZxUisw zpezCbuNT!$MPW9*#*L|;O3+TLZpXwc*0>WxcB{E_bw^oWVeq77{&8dvkHa$gKB~Vh zIhKRD7|#?8xl@!7DDCPHxKQ^@0DeGqBd8njlr`<6{h>KaTpeYG@hl3U>DCRU|#W?J1RxZ z85Chw(6fq9JA-He*TmI#F7#1;_bUSqS^~7%d7X);E^k^?u+fG zGK@MagV1)ZTTJqw%cEW{v{?lOL?JAlCZCT!g5Jg->u2teMGX8DpTUk={ZpW4w-YGwBcN!&V(tE zY>`=Ba_>E)oly;PHhcZC*KyYf|4d|hm2oyFG^0!;SV*ZJh$LztlY+OVT|W6d)eR_x zi0YeHjQC1%k02tqOU1lY$;R@l#Hw#OHW>)4f_e*6lG6DPiRDXk`h6TyNqk}fCWNN) z(h@Oblzz28JH%26$%fEwtfI$_>H)1`8 z^@EL)l#%=84@Xp;p97hXSwM(X60V zT>9P}28QFDWHi`*1j5Q+m(J!ce>b#3$PzN?o~DoiQM=+EHXW{~eoumD)3UVoD)Q`r z6ud&xV)g>(D4t*mfZmB3-KOJTVebc_$NTc)D z?Ne|;(sq^dLQ01B&k<}CBIm4_ueORR4-9N-+Ot}{vBYZ9v#{xTww_`zfupYBwTp`Y zSC<7TCQ#0>m+h*`_B3vW6CZ?zhr3iH?vFmGK$62M7~gnlrYPp^aJDF2(wJ)q0+wt4 zpo^D2w9o<6dHaHLHwigcfoFo>2>fQ2MDuK-8~fBRiQ3S33^l|o3?}9X(CWIxcr6No z0xRMdsFtNW<>lLN-)-e?wKTFv#ADL8>0W{4V4ZrnJv!!!kvawONWWMrEsMbp@J0@D zq|*@#WS{?_h)uk*vdH!@ur0=G+%6aeO=4@UtQS?J(M_W0c(?P1i~YWHV8!-jdaWOn zG)l&AND0qX#A081AS!an#leghCd?(SP8Tj(gnO0Vidqo5@$X;%ZH<3B!~fLJf=OUI z9_3)|JO{lEaNmB^{L#ap#>c?K#K_FV#?1JSF9t?_2AMO)_Ff@zeA4NJ4<9w2yck=& zblu4A&L4CJc1A`9#tT5}&BayYUm^<)E1qvySnjf1x}R(}%JP-(w>#pBcd}dr`3x7% zHnNLYc!~&!U6K26PoCMP@PO;d9iDs79)##H+{xj!4J9VAKU8~hX!3JOUiQJ9^ga`p zg?N^jhQ!v{yDW_HuP#|JcFFJdUP{!u?zZ|=Krko9=B91LN16A+QZ3AndVb^6w1P6? zKbW6y&lMDuFxbyv$gmUF;4=}tXus>af6C61<047@g&lLyv)@;91^D=kq^FCR1Q^mM zqFy{#^nJ2qYs6naF#DPt^n7qM{FAQWC3cyIbD~SiZr9n_i--E{jq~Gw+%e*d;Crr> z#uURR{uF%6k(q_BwNx?|!O~ZM?+j0;xRD*N-la0ymDpW2J|-&$?wqbn@J|_O4h6<4 zj6F^vigll@uiIBXfZ<%uw;?U>aK14{Lvm)q_3Lr4$NsvZ3=IA<_WqaI%uAS7K0am0 zZ@I%y{7s(F^M>6v&t&*yCev@F{RI>gi3(o#zN_zY;xelBvP%%ET~CD6w8fb4Nea<8 zsjnGz;a+jXT(&|du3b^~rqr=+H>UT(#;mMkqjo1$hfZdnZKO7m|G+;o#px$lrFX*12Ad_A&3g6KWW;?$VjsHJ{NYpu2Pvr=L}X**1bq=@|^FTv*vjnD}R}coMK|TR`Ku@!V5L<(&VzHT%+i$b3f>k zcPhj3U{o5Wena?@7NlR$=jMH9UYX`P?ay~>Go$_JK88d_S|Pt`>6gsTMlL=TE3>Ix zXL~-J57kcAtk{Ethx1rdT$UY2X!(7Wly@NI36@4T_?3o7VxN!Qu{X}LWl?V}a$tGs zwz~F3lb8SDJc9_oP}>z%5L99XZWexQ- zeqX$7^wGTEZlF|tdEp@OLqy-HC6=+7F%yl~$3W#)qtGXlv^Y6gPEIz}s-D7<%@_Kf z^8yMUmN;b#-TxM{8+!IV8Cc%S&n~ z{7X`QF!UF1$e_CVk3Xsa$m}|Cx1PGMCM7DEpSozml0kx*J^$Ht$=Knh7*6C88>pQH zT+Z~=MI8IBRV=eW=GEB$!`^!bHPwB8qbG%s5Fnvnus(J1Q7%k5hbCcpfnK?5fGJL6ckZ-Zl34)zVGk--FNOkcjnHWxpU{784~u+ zDtn(-~ekDi-P*R-21gFTHwEchVd}{AWl37 zD@O84I>EBwd8!Wu$fiR zOR!FrC~;r(K3WQI^1vs72#l_BevjB>ujA%VZI94A@_h~_+MJ9R8WE;)#*E(^)#}tN zSwj7m>I{|5^Q3$Fv*iw2S541s@ns}Fzk#KzndgOm+$sLdk7s2~qj~dDmx3A2@9~y~ zjkfp|Kt{68RCZoT+huf_E%D@}PcCObi%p!B7e=h{6?{_phKYhTFDxD{e74*8!AUPe z)U?&uxSY(eQk9oPW+yT%I&5e_NxVGUHQhOxXPZ@o^RgK~2bBI+G!)G$;Mh;NUKf&M&61Rq0`a=t%4L^iP&m_D|KjLsw0@7ZSb3F@ z@tnwurrh5O4yl4I4RDvY$*nVIWAE5s)>?heOz0V}aC(#V(%Uy%Ci{!)m*kvZbU~^4 zhZj}CW<08*Za=Nhq{t8~p1lkXkz8}=xizVOvo@m>}kF8?Z<+6;+Gw?QIVgoYaNw!S+ znX_saO{F#zZoUIu0|{2y+xe7^#U^G4yn%pqPm=Tb6`f%vCL1i@rfqin-6;H%ik}^d zy{=ZfHGJfUA77}|!c6M9rU3pL=*}wu?91>#!-BRXd@n!j zPo4;wk%#+_AAp$|)$c;Ea&$t7#H7RYLufym6$gW9z6Q)Mey4iN~wm+cJW$C;Iww&z1=cyEM4hfm}z5xOSo!H49`xtHfW!-_Za+OZ_ z$I2v@5pLd^4GUzTS4A)QfR=2re8-g_%%U(>_EwA_R&#~1<0y|+HR(?W2T1UGvKXts z?vPXjXOUAD!$u`9=#c6{&aj1c?Y3`+rHQ?#%0D;eL1wfN(dlYwGiz16E0yP4QhjP^ z7{xC<6%M($_Gm$gdy9dZT|UC2yaYg%h;^Z%oqi=-UHsw`0ueMOiV04L%tm>A8Zl(vDEMwL_;I@CrO39u?dlT@ zk(7-9ngT7nyo&q1jzjdVKedc3k!oH9;TV}QKGp13T${!BJ90Pv0tyFT%Yyws)}WRY8O=#lD0<0ccrTmnma$BQ5Ii?0?ekchDnFfabP+bPpHAXC zT%g3z=z$PxKE012U$jls*UpEQ-SGs+nZ8KJe+&C$;Xd4wl=Zi9RJC9fyd_+vf@gdKgZT{AUZr4)b|p;>{m!2TbMs4! zHTyhjcqG|)w4OXY&Y3N<^ab^c_r#M3_Gh8h$rTz6gtv~TuO3QD%FxC=Gd1;-*Ns&+5qVlos3J(xu3)saJDYJ{F2X-+e zMA{7*8_BZ@u;ySL5)&ug_f0a=j+KLS-Kal_ik3~UjlSc@WF&N7trV%&WpyD-PfHxwBB=ytk0FNgIOU!t_MhtOX zJQ$Ntu(__`nZakzuJ&3rlN`tl8=H%;LD;~gu;8=g!8D+>@Av!*cthhVl-P0qPbMp_ z=l||Kz|t=9>lY7Q`Y)jSQ;zokxOIx3yFY`Tv24sKr@Z?6pJZF57a}Pd2;N>1eG^lQ z5$s#K91L}RKsf9tE;T9_1DHOCq`{B>F&4Vy89o3m^(r_HZ*{nGWm9kSZ=$6h3r+Yx zul^@}LWYsBCm{Lu^YW1EPI%ui7962eWE0!I0GEDmbh)hDPX{D;`!t)Em*ZTVIR@9<0b6~qy^@}d>2@AHD z$Q~k|#T*5IXeYEkU8x^***mj8`wwN|F#li-k3@b2U_`9sG0j@KR2KdU2c4>K7yq@- z;4B>e$~4UIFJKo8n25ky9QCkE2a^!nnE9*5!#=q(q~10Ys&X?-#P>IA(2uz5Ab|M= z;V&SUTJlAcgI^pO?3=~keiiDNrOS?lY8c~UaYcjp7vmQ!i<>2X>UY z>jD)QI$^$($Ou+nNLp@z^|__WDJRr%tX z<}EBNP)I*CR`QL%A6*R9Vub^I!_L`}hdK6R-wB{(V6}n{uog)zQJYTd4(JSxPI65*K z=KoG~B?137PS5Sn^7&5v5P;HtBCbGeRQNgef}ddT9D-+^MKA5dna$$eD2}qki;8E% zPtHBU^2jaZl$L(@6PrrJ=6PH(IO@hj^9jCOlbXe&&AhB2g8vn;PR13=dCJ5KR#g^E z_A#(?NybtEi+|^!=KZ1b+G#4zOY=L>PrYgH3_-$b!#-8&peFJnxI)zwgzB5mItF~^ zGO{B?p0Cwgpw0G$pA4W9eE!hu0`w7+M8b2u}Q})hBrkf7>&8f>ndPx!B z*9ue=$cG1^X~EvbHkmAdqni(>NC2-eMo*!{UzPx;6&UgY=PG_xr|!EH=D-%NVmrs} z2~|k-211D7m%zfoC4BN0Yng%}Nx`hMSy0^gGe?v2c_s`ufJcM2I5>vKz_v)2j#Agg z+KS&7o9X2b`pZVD3cP#-qgTw)&{{hc0BVLCJ8VNP2qs?Ltvr>H63RU&@`ofaso$syCk$?I8IRt8NO80)c%BxVq5>Bq#}gnC5`EV$%3PwgD%g7B3S{V3X0U>jRauH^37V== z8*rP`&(p)s&hV}qMnhsXyge>}vluohjFTYe7jk`#Z7tnUyT^80SIBLpP3ARxnf+zM z(Cw9<;By-?!H#~h(&a&Q&oojvbt+{_uM{}e7@^~|kZQ17;kfVbF+i02g~gtzq`-3< z5U$)j&aS$!3w7kxcEOAmG^i!*E2}jpDbFxACF>h_xF+7?4FawS7JGcFH*x5OL&v#d z8>lcA#fjFGYZn;{M!UjTrzGyUT1XY^BcdITp8*x5GR`&62zC{@@}r#A)mD{+xHB=Q8P87{$p{w?$-yIN zx*_$10;hSac%ze+?cz4)b^I6U>W&IBTqJe%*B;6PdnbR|%+!==30xWW%JYFb<0Iod0x$ezjek#+qW%BWTc8d=}1|mj~R;TacHL_Pdk2w2%t~-1Bf+ z#+rdTVNdpM-MO}#%~}!3re1u`@I7rG(1Fd5MnK^>I0j5CymXTodCCIH^-WMSdV++f+~NM(!2asfjF9+P!;boY+4{dQVRrsT|AogSJMbUNGF8||wg z1l0G2L$P5wMk;WAX^Z#@0cy!;8RId0flb=58Xs-M$jVV*FNvx3_=ZdnBQ+p>`jhx==jM9rN~d3J|IYshzb^8_ zZ(1Lk{Wm2=6V=9~=iseGOYsW8b95I<7MgFAU6WiE_xml!CbN(YgCc~gq?nE|Y2ofC zK>El73mt1sj5`z@Z_N4z9?>b3n?33z`!sbf?OId?^v;R650L$71yLf{-;%oA0 zU`Ld8XfNs$u{ribMCc&W1C$-)ndiH~$E3kFs$i2&f35=IbG~__XCbN&AhG7dFjX8@ zEV_&0i;p3-(mha3x5fC`{&W=zf!>w<7IOc0-iKawlq!IbCt*)p7dlo8zbNV5Yk~Kk z**tzpsbk@19BXOeDid!in{ji=CLW95atOb-8^?q+C8ujc+)h|GJ<*Y+lcMY`{NWIZ z3R}5r_tvV~$Un?2qz8KEnnJ8E5}nBTE4TY+ zTL-SKHQ8B(5k(xw!B7}s>NpE&K~>W5jIWMZJ~chq%vo)h2}JqjrB~z?LFAg__if_C zJ8D!uxr^Rg;i!wZ-(VwLe&^_TvccQBNiCGzt*t$Pn>B<&oY;Zu1S`8?ZUK_qdDXJ+ zm+K^A^iIcugn$&UiecDs!CSz5i$PQWZ+uc8PN5@()=|s+ekkg#CP;rFO>QBQNzGlT zv=Ws^jfUKfu!cT4$haY)=#!Uq6-nO8)`9$5hvS z_}h`-zyYR2p1*(`>7Uvh6)Yj)*SLL%iyd{6&b}B|tu~biK%G5dC@orqDK?q;*2V_7 z9KR$WAsGs~4#d4Bd+z^Y%N~cE1F?KO)T11BSK@;C3mTNio%uOK+%Jp4$Mr9a;*L7KT0uu*)pQM3*Md1_sLLDSqNufIQs6o^72sX7m4(2*Db)v+2TrBu+M$D&L zeoQ7q>Wb`-l6Bajc{4vXZ}2UfYSMfS-sAZ-_2oJZMd4?=>P0Agk|eTA6YJSx;FeBf z|KPqHsOT=2;G!498Zl=CH_|q17hM}EH^irns3=F~HGIlLN`2E8QEcmXI~q)1kJ8T< zDaCv4<3m0NLcLAoapVUlKUhR>^07@0-_xnURezAU-(zxcV{g)<5ETX-l0{3bKwB!o zhhpI9!saNA+2MD$ua@7(Fy7vEHgb%x{-wcJb@Bf}GpMd@%LVDOMlcu_L z(xm%NBFxL5V^A^&Z_K4=<+-T4f|t#^Wrem%Qm0R9K#y(3CY&^}yO%1xIruhjM?d!= zLBx_CVegcn2X2%;e1<8%t~KX~?nk*FVO%chRZIdrJL~GmD0*efk-pM zvV|!iO!8WFy;5`F^lv97iBgG}W0YcX*u4`J*|P&hCje z^ZE7Wf+VJzM++pla-AHt)4eKdp4C5L85c4cv;a9h7VNG9AH$vOXVby3H5Q+XoL{9K*nS`Q zwIcyoYBXZ{!c;ZC_>tm?X;41j(klsA%xzYGPTR3zagF`77mr27s`}ovZFG#o!%Q0H zc$@RMz!VzW;!ve*k&zSBL#f0aK?A!=#q#;;K0k$ue6G1azrC`PDar&6~alkp9 z^oOK2rs9+POrW<{wC=^+aUBfpyKTHRlPndE@nWl`Y0O`O%ZH>En(dk6WWAbgG4Xv> z@y58EnrDDi4A~pcx@+U=uWS~sJ;rt!DpBoVj@Bwr{U)d!ESt3(2|0-Pd~p$04SPx# zq4oHm*~Yk}dRJwoDssG1^TGO`z!Ql8KTeO*DPqEl(-tkW8G=mT|E4R0^-ndE_{?Ow zf1L)RRf5IdAZCMoU+n65m{dN2gJZ(pl#rue5{!TcOyHS^-#kFUT1r2efr>}GPvM_h z0IQcA3EUV+mKsWtpb_M}+F5ofZ`En_o}ra}oe50k9$n9HC_o|VEam|VW{+mT5o|pt z_;E?^{*=CtjD~N$+Lg1R6kS^1QKGsV64$UrD_B;8TOMHW1-|h0R;CVi#TEEi4-T*Hd)O#C#Rhe}wUlb5W6N5X7 zn4d6_Boh4Y&&iI_XHYQ~v|*6Ju%K?RP2Q`27ab#`zX3vo=&V{DdG zzHota%Ry>HFrG{^2f|0IP-i#}56L4veHy=rSdi(0*o@^giJt>X%qEJpj7 zK=>-15%CzF>}#s+e${&E0Ox4zyBJazc+4$1ci{T76ioiIA!M2#PPepU6B6=6G%l_^ zucjgh>^Dv6oBvXiQZ-aGolyNibIXpa%spQ#Kd%htpD*nA%u%Yh!Zr!3KyhG`t~*K0 z=WcgB5uCE=^UA;c!k}+iKrrodz9rEIee)vO69maz=%U_!7Q1p7^ zgHSwwbHfU8*_6!k zCYLnVugUMfcHtks9gGOGFq}E|$C zc+Prm(6;-m$+J5&#K-fJ27tE)+efNYwjQ!1<_;BU5-byEXK@m=0lR7h=G0lvc|;`l zGimk~`kywon`4rKV1;2&R@F{pOePTq;jb_3;;3)6d@c;9uC7gmJVQjBqKSw*FmcL6 zeJkUM2e>#a)fZ}tPt{S3tph5(7Jcf9Y|6_G_r{;0xuOJ3x0FL_nkzZY+0euxd_`Qk26Q!r_D!)qxGm4{wS);DAEJ;7lvkx(u zy@feg@uuF*xh;OXM_{4ZGKxeit$;Y(?)FcP{MO7~3RG1mDxY~w!6UX{d?q5>P?NwO z*6T|ceyIx9@38cOXK2_>bWArQq&s0=CAB&DTW92m;sQP`6wW2s$KUoqBv+N$H|Hm* zpO;?!q9hi;$NvJ??>}4c0j_|nMF+_Q!~~9q37;PhB0=As?vmn}(5@K;4N`{o3gqfY zGEpTGgYaG3KVs4ziDGfiImVXd*Y+u{kO^NeuxK*`{UASuDi` zEVGBa>GT&63RD*KE+q@zl4x48D)z%)&8*gJS^YuP-E|DdOsTtn{N& zgqO}7YeI9imL1O}gHD{03v@)r6%2Jhd(kj}-za4mQ{nl44l&U9_#dvRuWnWNF&O|#VHEalkO+lE8_WkmCt#o%P&N(GRKMhK(+)L($Mza(+r zGuVC5f?^?V#@)?I+$D~uK@3yNk0h&F<&HX9m61!Dr%0?|!3a;ayiLs+ejG;UUjAEZ zseE?FpQFxObNpC&gd}oiIIqw6>WO{h>flV>BHrt1pog+mu{9s{>*)vv2yeXTdtt-b z2qvPjDv&Mw^Yjl{V~uPR|D}@3i07$^p7BcU@EnEZow$rlwPVcTriV6icN}e`dpPd8 zU>leR7o~xICrxImdYzA1^lt3vnPW=ZYrt^y@Q*agWFfb}QV~o^yP`y6&dKT|Ct|a5 zx(dv4RrUSrjEt^_&WKNQ*F0H&A3VZ%eV3C;HIMnFy7P%~M!uIHRQ8MEM)ipSk3QJg zr(E1MB4y#vU^wZ`K>W4dW63IzQDpTF59c?SbSMV1mqN38q;lKH2Zjgo$0aKz&fd~T zqmBF#87uPY2>-xT*+A9FuEP%ijl8TG94)f-WNmVma5svBq9Vre2C5FVC58+ ze8tE*_%kKBUXm$Q!xqd*QGq-8Q(ypK+{?f_a#p~EM{%|SrcW44U4oHW-I@Ezf68 z#bT9ML-ZgFf`H3FRi;iJ*=BZS04af2caz0Ss+&VSOiKBIC@gsk`A(Im`i`X&o4V4R z?Ay@7pQL&-FhvWolWXiH)yG`COd^~5d!5eXzmPiT)2mK00gc*RxPraXqM>LT+5)LC zFR(k7ORSR@TkVzVGM(wDKm!UY6pk4|C{r)L(s?j-(f~_48O=185ae6ng(iuySbFG- zmC|5>;xMkR0~`H66edGAJ{OE3#;@q`j>3%l=e<r8*=S{~!OB15GCg;ahL=?|owJ?M6=tHJQ{7P|%u zvY2-jlM5%IVQGysQInM5sC1TB0vAFUWq3Xdp7^G;h+nqLDJf>Kl+86X*35Sk^cT>< z!~$Bn@$=WOO`6ncDVsFAYn5~YVG~`Mf0BOh0VT79-6GgT{>$rSitSqX)VOB6VAL(g z|9)kg*$eUq`LOm0Hnom!DIt|u8o-fP6G#La4jYtpPY+Be)CB)&bO zHkh!y8)TbQ0z0VAsc_Iwwd+eyeq~&XjuW;-(btxtmsE8~CPvWV%-$L2Jmzs|=X?(5 zzg&xEfB|W+(pX-OS)Pwhg$M>`GIq6Y1(9oLToo2)U;nzkn*_it`G=AL2^pC+*+Zv` zk>&kYFjBSKhO!S@ zJDFNm{vMyNlb;Op`}_H4jb4yTM`zc?WU5&LC+1E) zJELp(Q_f{iaq{osKJa|s?n9@q3`@tEDBAmcHTH@nV{VA8LRfMwiT|iBQ7w_xdY6f~ z9@O^xoq>5wXCRkpNT?RaRTSB<<{uJe!K$&-JwjAB0t588nWE-`wQXh&(nn2vW*?F5 zsZEfcoY!c-9g~#58qpRWsqyBF?jHFe=!HLst-{{2fj4C@@{0%?09TE?XWBlAUHePh?RnNc8vFHHA79Fl-zMf%mh+ShyYKj4D3PcIpyCVfI24!h8Y+)zL^ zqv8(l`$uEU*D%r50b#v;2}of^X7aChw+$|!@i+eh9=EaRKyv-d{=7ksYE2bD-QSQt z*$NTv|7a^vky^<@f?B^Z|71y_3G9Wu2Zp8K@v}mm*oUW83+4CJtTTf1k}yf5b?KcJ z65|&uHpN{(qoSfkSCW%o@*63udvacUk+j;mwg~6y`tgI(F#Y>19yDpqK*09oYE-bj?Wd@1wbTKd`-{6ozz!a7XAP)<^dEB(iyi$=OmmEBsu zyFBiAeS6-tw#mq%^}OeYgjD|cBNTf#XH6#ZRgx z>z}`zqfMNuNb_DvBk)|{CX;7IPbbBNnF zeNj9waV;@*4>CoUo}W<~x-qWnDtYg+dEkC4L`Hy$@OK;mK1R$|s;7S^_RWYsDM?%3 zaxY34@yjY5jitoB?dcq9>8*^5N(R;c+O*sd&ET?B`D9l4t|Clu7+KqS1tK*tbyr6d zx=TuM=-EFfe8s@6vvi50do}%QNJ(2AQ=#&^OR*0EwlO>#Kl@fDz*ct28_dz8e~83{ z`CK0RCy8Q+tIU&MZr8t9lfyvJpv)*fb&d`Sk`}_>wgMJjHdPM3KUl}r;>!wVyKhfh zO=tTN2C!RNtb|*L&`c}u@TxB#=K}W>D;QXqFth2EEE=;Izq1Um5!GKE2Vmuo*|lQ4xqNf|%MDNc~X zCCeSaQW*ZWup+M9q%)gW=*6x3SI+brnm!XTPJ!cjjeD_!Z|ai%m4zc9t2P2u8dv|uZ*yht)+`($^N^b?J73J+p7ehF7Rd0}&jH$72Mx_NlN`b9 zqpDf=zgx+NeKMTpAp0*eD$nSjqtk_3$4{fr0f`~jl`#euzO<^2Bx`SM8+U?vFwcKE zAHwn8@Ecw8j15Pa>lVmF`q{K7Q8A-gNomjs0EAudi3u4d;}QTsbSw+689l1tV(B@BM7G`_F+hy2!T=G9Rtl0`8>5nWieER!M%91 zom}Dn;aRYGIE>o|P%p*ct8agnflu5XomKwFtx%soGo$Q)XO2j&$*qb#U*Xv8=k{oi z|JRKU;dhJf@+#MVQl2Uyq0q&<*>bk|e^{Ne7K0llMbM#6c@MY$Q5_ zxxP+=)C62I%X}gu-pE&pemcslK0%xcyloU4KJPBCdVj4!b^Ez-KrBGR-T~E+JYY1? zrZoq{DWSPbuwJ+jP z*3+|C>UzcKZ@pmkt3|O2W@y5oB|FnO7st~-1wj>u|!B06D|L?^m-^m2plvCAE^%}`JKY!r|?C?7FK=(ZE{m(3qXLG_S9 zu{?$M(?lS&8k)a2iAGL4MfK+h0cd_as&5xBYOx%%%(f-qvVC603Q(ma{S3iv{0&}E zhA#jGb};BHGX3i7Ian6JTpWc+eG&H1`Mua& zxS2x^6vRy7H}*10n@qwrgYt_FZ={4Lh>82tQg8k_c~nBDtxUTDu)e5LkkVeO`RX?k z7s(47343@6fZ5Gh{slP9E67dXcN@-?IM>G0!wTEe7JJbn`XAv)6&RWVZeE+e6y6rA zPSe8N5~h!C;}e9T=zcwWJpo1r5Fax=D6l*uZlMs)R3PMIVn{Q(iuoq$U7&h@2HD8M zv>BnTS&ZST%X(>f(zap=oW~>>`7L)h1;AZo4HkBFoi-`{fpkr&QVaKc(;1yz0g%5r z-6cU?(ts_JlP<#XEi?U3`{6JVL{Z#rV~WiHyAvy-0toQaGZNov^8Cbj`h^=}piRGG zaU|S*v3mY0|7pqlxtxmgj$W*;yQ3gub9ABD4@T-;pM2_!xwF#lAOPDMN;gQj!^}Qo z!p%rk6lT5T#U@nI*N9Z1Un+<{+fKV!v3hfONZdXbHYvdxH8`ntmE%6-aykeFs4gG1K_&c{Fn;UQYgvWcrymT9SVB zi9Yrlm8t`=ELiFOE^?17m^w%0DWL%Oj0(&#Ks;anD-( z3q@Qu`S4ZDe2V4NNdSybQO{T=5_r{umX5aR2v?QNqO(o775LfW1o))VqtBU1Ex(Kc zZP(HD%bgGZSKxy?fbsz+3&z%mV@`;c(e*}@9xU(tZ>WdTZyUCsb4-)|cl$+4N!hv| z*t&OW` z9%CWnd>9qz%{LfwAK-_o&#UJmh|_@nkgqd779mp@BCs+1{K~haSYk?}M1k@-RdXJJ zH$FPPoP}90@7Y7cZB_n z=$a>g`(m>H+e#s=CUUxP4GJV7;oIB5U8V5mkS(X=K zH(!|^-73o34YwdEK6aF;xDR5YE|2-tK8dTQH8o=$nJ=Bl{J+3B6e`GaxAb{HP$_(I z`L$Mjp;<7Y@Z9`>|Dq?5q6FuKv#cQVA_4);FCdmDGCf2hKR^S=c#E4DL(|!+npow& z61(@JQqrF>_JUJK!O+Ub77UXl|9=8Gpo)_yw`Fek8J@uLiiQgqI>E*C4b{S^D-@-g zs)Bzw2jeQrRJ>hURSO$s{2T=6S>!A8OiGM-JBYn2mRiWNlcz(7^SV?tmX-MeCkQ@r zYz1nrW=@=a#0YwUH?LOSTOnS!fG$1cJG;6qtY?I07lwq9@ap`c z#-y5cWaq)FKA8@wqRDgYn*V_^Flv&w{-X}sg8wrkNx1FAC(q}tMct6)^j>rZjL%*M z%>w6czNl;6T-g(vM?L2gmGMH_I+h&UO^G^TtFi($`ykFSJ~h32tKr-X2w)Yf74fO$ zm}I)Kqt9Ogqu;<6s~lF@vI4rfw4c=OrKZ48K8{>tmK+mJC8+f=drakWu0OvW_~DS@ z9SfmW^^)T~gBZA_4o-lh{ACx*9Gy8g>23Pz%#RFEEmP5+utYwVE%FL0u}W0I>@FTZ z33|P@U6{RpD6UACI`+dqd*!7PSLV`}@XKW*qP3Q*4t?Juw4d7r@eK_x{?aKsEv@!% z<16L-lK;Qy|HaV&xrCk9(zE-&m*Bi%ZX6<$LMVYOAf5&rE!61pMi;tKXFpWSwRFn~fcDk(QtLXu5 z>M16D6HKXdGq^jy$i6O8guFe*37%WnnxvFAG!)%g&7Zi}`Rl@u%a4Db@Ho7s zc=WpPFCge;QtRigci%p)pM?SB-#x@!|2)*Jh@KlhJ}`ppk=TTKDpR`hH0Rfe@BenQ zANonj(EWUWU7L@nq^|@PgGTs19oA>YuCfba0wE?z|3&eVAN1vxBKrd050KD{NUc+{ ztyOXD7_O59%lz8^>Lq52;x!1X00g8Dv4R)nK+B$<=PgCQ1P~!LB2E9wn(^zzwvR^4 z>35fRnK1*@+xNOpZ1?$LUe7*tsNfL|{kK!8L1*GT$!_2JHhrJ++kOw}TNldqPTXAoNk$CEx@>No$x{9hyN z_mW@g^d*W&_ffYv4pL4o;!S)BDdKggHZ6{R!Wj4eDP1;x5Ud>Q1hWHLtbu)-0^y|yZ~Y|dB=0pr;sBT@joWjr`bZ)g7lMu z&|u=dKNZKw+ZIwLhSYOEd;xb@W^Pmf!3>9@)&8AN273(%2L5b;AfOE-5leMnq%#U- zhF$Hv@YupdP=?VAK23i#yYsg~EBOiYi!5P3_Eem~!;6b5Ni2L1=@RpJn3NriF`>;8 zK5L1u=jGyloN&L2AJ=hxnbT4HdN)h&@^Zbfc#3x=$vLFd(x%F&4&9uw`$xWQ@K@+@ zC%A55FWApf+xnB`rH}xKJYWU>ULc>kthiH2K*L(6JCtV;X2H8S9E}2&h{Bbw?dRPIwFaGNHr*KqmK*a)7 zd`=ZRL_nMn#*x>-vG|OVqDPi-^xH(tlkIRj z0+>)d-M`cSjO~>`{Fz;bddxjTwHCBNskStQ$I>|4Wx zffV+>`arJZJWKpX1@#4|f`WK?XVi6Jw<9wYB@a5zBP zX3$7U+8aKG@u?JN7iN9iD^L`4*Fdr9Ajym7y1VAcFxKO( zii7<76W|Ic{_gB6-TTthhfWa;Xg73AJ>nNxr&Ky|nZ5Z>eCs`SCOpV*<%F14^t%)j z(uyW!{TXG3yUdDyPzx(7QtQceH$19PXr&X|_M{g-nkUW51vx)HkP$O)czEpOhuW7z zCiv5=81nCNhb1c{et+S`#-|?!&K%X<^RHu9-#@^9DeJfmxf^9ASjE=K8~Y-KErYQC z_VXu+n{$jDsAE?@xdl{iL6tjtrbUD~gZb1o`05QZoms)C4m7_4lFv1nq*2*te)zZR zg=g*X(VQFoE^{O1f{I&6@;Z46jGEUGN=4OpSv#>}h^@HLX1PQQe8r`d(WTH+Xl68A zUE5m%%#peY$AeHPi)cxy=7AarT%^g%XKIlR;6Y$9Z<_Sdi*B;YH0Zt|+Fb0#RBj%tjov_iG{16sLR(wpI1&J+n_BUbbB}@xSuhoN^=e3*wBXtj zF!bKiU{#MP)q`gZCGisACi)LFIW{V`oQDy-|Mjk{-HE~T3D^<;7mxLNImo0(8N+_< zqhiR``#{b< zC~+;jGxvT=G6Qn_oWOOtpT@G^Xfy`ZK5qnYQ_-+Bh23$W# ze))9}!MiQnl%^0D3{+NAqp#;4u%D_~eak!0I4IiCiu!NPRrq1PiK;ABW|X^8G=SZr zIxqRy;_geruuY}mBU<&0sA%9hZodRCORV;lv@82^T>3+0rbOj+VeMmM>iI|T%(vS~ zJ-=`$*y}#YNt()3g5Djl^KMvmiOv@ z(3DSEJ=LgV_Wb(>koqO){-aTloT*4Xd22~)ulM0kn?Eq?w9`g#o@qtA*{@3lCo4vj zOjOEEOJt}ZsjO8H{vft6sCnnvSDhE5OjD_|>|Zq(^t%QH#`?2I;)d<=;w|!KZ9aW; z;G1`(p3epi?OruAUFa-=F;aoA-Q_T7^y?Zj8q65$9&OpuYt@PBT?_ zq&;oZ_a*p=8lYakBos+YJEyvMj1iDR804S*c@MHOD8$CJI{%nU+_U^S{APO? zu9z^Yr0a^^MBsa?JAnZKQaDRKMG>44)V3-S-natxgy*%=+MLj+OB#pyJdkfxfTy)^ zL1?5~pVWPQAYkHaB?D-q_nwlT79x?vHxXBsgzIDg6N%gc-^7C-hzHs1Q=78FwFE(*E)3k3VlX z=#P}rI+@(IDr-`7J}<#t8q4B%Lz7V_yr)()<*ZK1*E$;`Ax>*Yakr8|SuLf$YtX_s zHy56~>F8RNTTTsHIB>$Ffc$=PPM6;EFO*kf8<0!12Oc$ z!Mm@;*JMc~89IhMi3j0LSdcy9y>JB7aMGL`)@>=F9&goNrzdn}1b>xr@VVu2QRh^Z zDqx%cdk2o+n5*x?0go-n5+XN*Uf`mV(WDM5q;|BEL}rGN?7WL!atK&-^&O23>eFKV zTNu7%Va^`Jc3l0t93r;||8=n9mEA>DlwAF)uymW0TH7_&&*ffUOwnQ>5je-~2nA9! z2W83V)AZoIG6D-UMS}N*7_%HVi||WvY9bN?d6lL?IG8|5E)jyHb(lMf2rqTtU`Odr zkm1jbXkoZCrr%O&5q-{^fFq;R8-)<52TLk%+B37R86M{%i)Yj@&T5r@I0t88fM(42 zZM_Kf^QJw!i*9&&i#zK%cIGbOEf9#++W!mG75u>Rd|Yf9V3R?5ByrJBY_XLfPU~8i zd^GgHYO-7kiC!q4-oL2;x@O5H@D1ns*f3ZVJq`!A`Za`4NlSr^#lmzkyjIZIV-XR3 zc^=;7cDkrvEI01?aJox7KMRO4&cgZTOC7~%p#uqU4Z-C4OBm;gaI`|? zS$RWjU4b{i_V`L(ui4czod#oEJcm?*K~{D4A5ax&J@D*fJkXw#ED&m4>vJuN^Eq5- z&}?B=+d~2In4reS6%FngXxf^AN#4C|mG?j=gcM9NHkiEQpIY~kK;-Yf>^3ElEs5|N z)Y#FR*KQLm#u5#)u%viu zAdFp}yW_pAW86W<9o8~Tuf;JA&w`ONN0NDkQC2rSKe{Z+u-s?|uMT9jvvSY3t2sR7 z84d`}$2N)6eS<^~s9lg(kW9n+8{WqV4sVps{LwOK!z8<_%`Vm2pPZ^WjVI z6Fb+~YxijGI<8Mk;7whM{~0T0?;1AEQ%ZmtX%kVXQ@TTJ+xf?Wwdj5O1xmY7^=?q$XTSYYU{;{0#-TYR?lQ|QQT|o9 z8C@e<=gBE4I!L+D2M>*U#rFC*o|J6tHQXD6n7fewbl|}2&vDvmCqMAlyElSbHluK6 zyyy5vms2%e*Bsg0lG#VkC1Z1wce3T1D&zzWtnN4d!wg2B_#nQD9RZQ9d`WuJ>Aq@+ z&;~APm;Z@7y2iBMF8ZFn#Hew%_6##!l_0N=4`eHguBNA%Ur$O~m_Qk62zUEgr0T#! zEFY?Bl$0gsUtD*?Se|#A<3KX6-P+-aG-}PySMwXHl9m!}4rT&v9QAs%i3fBGQm_)8 zZ1Bjf*hmkP-rq{RjL)~CiyiKD~sf4(dNWilzj4kCwbPMLpDp;-{qCIhnV&8k9o3XWv!?v z1fJu^)eXlOh8O!S_40;R=Sl11*AOUwFv$bq2m5U88yy1<#@4;pI(ajnz31OTtRg3Up>P> zs#p%D7BM$nlj6on__p$II9I`vrF(v_OhrUqSdfJl3C4IQ1P#73`oL5%ZR9V*qV~6y zyO4``n8wnpaDQyM_Lz#4W~4tR^s<~hR5YFbx1Fu;nBtJEit?Cp9)|;#;?-!Dic$Wm zx)aj-5W)N1kEO}GSRIuVmAQ=<@B4Ha;c{I+-6K~f?%3Vb{eS*Xm?E5a;Ew_HAAUNU zuOZ9k+ah#T65{D>7eUg|S=-$n8C26}!XIoB!GEEf*SZ%W( z=-ViuJ>TS=2S^NvQoS&@+mq>j(&3u`B4yB^>>u?&XuN+3ncy|9*wKR|Iavu*11WSm z2cisTj8j4F|3}Sm9Lo3+JSC5;rnrg7iC=){bX0wmV>G6oD08)<$Y_&oPA$>kWvYs3 z77L!Bp*wbE7AW8)gg<-GZ~yZ^2jW_J%`t4`O0U=jqQ|_akrW+2HSZ_#gf;1-y}elE zO7|^01x$bU{^#*i?FyoJK};R*Mer@|m05#JIJUIeG+FbDbGh8AVQkGB)TKm|l?k$` zw!gdhY%b1dXO~|b6D!~Y)8zZbb;nMEnG`_IGF!sQ=FIZK9TCI{BoDMf|O0{!jOR~$ zU+XkeR$&}mlP%z--@S~urG?0@>&lx+-sE>_%#HpguKJQgFZNV7_LIIwr5*1M_Jb{> zJS&gx)*U#FDVFxeT#*_fk3H8)(fsz5s?1fH8hOOV8Pwp6Wk#oSg;V>f(J+fk=uLkbIy0Z z`}_X5W85*`81Ie8g0)yvs%Fi()||DfYF06F@Qd2*8GYl&?1-2HucN*Nbkd}op29Mk z0HH!!4%^#bDCU|a@L0iwRDI*OZNz!zovceixPJipZ0fwrqQ;bX)0B}KePRZ9bIx!m zn>`&PyZxMqW1V1G4&;P1kSs2ucC(X$kccD}71h^zD7~}0(wF~B=xqUBTd}roz5Ilf z7SoS^z+oSW$^o*sibpuHDWjjXVavj&TmxjJvTrTUcOa!4BA>^B>9-t(x1CNQ!(`Zs z<)3E=0HH&N`Epl6O{wDQ8A_YDbrHv<&2Q z<;g^eL#u(mYeOIcs&EgjZ#*;UW&|CN0bg8)ZNe0lhFv!43Pf=WtM&c8KH7U+j)tXH zKmCgF%y^y5D;;Dvhwk&T%E~%-1S_fgh2|6J>BKt<0>rn>+Icc#;JNlG0I;341}FLy zSv0)$Q`Ms&lLK`CVe;9N*V=e9Q9pNOtd!|JF zWf}DP0g(e54Dtt6D$%%7d$On3gf$sjZ1wt>O^NB&X|Kr=ADK`#eM&Wu9m
bxPV=4k&spEu_5J9zsAI$%DWDI6Y1CkLazPRc2Lz?&)v73$Y{ETLVF zhuU#rXOf@R9!5BJD=aCaEdH6OF~Y5bVJGJg0Pt4l17=z>n4!&^f!!=%7OYI}r+^!T zivvqQn|pwrhzhY9?noMDyE-SPKnJT>j2<+yx$u`4BJ1hNd2!U3fJ0Ny`vM*U^1Vuv+;EIHq^in%tj|3n^e&_z^ zMtz9jgz1eD%2sA@_WINepfl}j;xlDz?hy1s zC#n_rUWs6{CcpY?)owYAB)CjNL6a|oBsx6EUOB>*uU%)->f6Lvgk%8o#Mx+#5f1siJ5sFc9ZKm8R}-a zRrQ6iB&J^@NAKwVPD5P+&FkH?8v#mM5m$klf5MgGTUeq?eb?IRJWVu}L{lDOR5>2} zfDgrcB0r;l-Jlf%tbyP6W;x^XIRJc2ZEmJEpuYP2nG_Esg6BV+@Ay>0Lu~polq>U z*@Uc5avkCd{Vz;OwMw}g-(qsaKb+$p=zi@DLp-p_(EKX-bUIp~7jt#C=l!G5L-V;4 z_4n=1s(Wexe>QC{;Fe;0svW1m+b92)R*a=$`#@LHUF=*1#V}d+S6bj{Yps12xAwii z85&{?eS93;f`sJvhL@q3MN&=R#w}H*)rl#ISTmegf}y7sV8eIAL0)JoSRjS`Y`SpT z37Yo)l)ohh8V+PFYVS7w!~J4;8Iuk3E{=KyxZ41BQZ~@(xO-YO3rvz+?K7vsLx&^kjKw*3<<-cO(^Ta5Xyoquq z-^rYpo12M>{BK-5c0?JQIs`P|!c5QBu`YySz<~rL6@1>QW}-zTSP*6I40#fUzqnkT z8whulRh23`E1B=VAgDp@U%RB+ViFPvX1zgBV5`8w{K0&!Xmv!OGR3$57FbxnEXW{6 zQ>KmBQXUkTUwwe-f6nfW32?U^7bI=4x%@@X8ZQC&y?<52n#!n+^v2<%WOky7p@3pr za5ANQcrB?3V3B8lbug*1DyRSJqsHNWrLMkFC22{p%k*2+Yk8M&_@(Hzu=d{~i^YF2 z^SGKid3p->B-q){W|~{v(%M;*S!h{*MrtfK#c~tZY_>#|j!}&TiKP(^hYdiY%?WmR zju3T&?3DC+@OWwv3H8ER{$(iZW0qUslS2=)2$J}T?O^qDx>0~EV`SfeGTvi2Z=-1= z2C1k$-cvgd^NNZ0?~cqFe;e!!2lCK`XQq$@cFtXh$<4}`YXr%^jL^Av3zd65ZW3c6 z74gIiPQ)!>Z5su}$D~Pc-3d(;W53HQ+6LtHy~Q+GBint-M^CyO?SvkjeP?T+Y`VrO zPEDL4E}2yo%-^VM8zzqb_UI)=6?^FqXgwMxF0|rc$YP*QL`mK!NSf)b(xX2B6~gap z^Z9rplyD>(c8zeX#!U}8n3??#;B;8C0_G_5PWci(mY?w;dmn6jpXEzj) zIGHEtD)!YSHGp)-aQU2>0xx#tzD;4V>6MwYTk%-GWzC_!Pc-+%teIj8)1CVKP<4AH z%4hutFxPqSPKgm#g&*m|Z;gB#8rjtKYtH^Bp#_Mh_&s@c0iEQlx?qZL>1c3kpp{RK zV-7jhbURo~Naki3!6dfr79c1s7?O-0&TD<%dVw8kZiS*A;BxL-^D``tWDy|`J8gYg z3`JNm?^w~hM3++CvKS2Xz?)IqklzK^7~k1Di!>p^Kg$*|KE3v+`l9fI$;-HDZ~|Il z==kQ*rc^x)vmM~Z1H^xY$bIRib;#86TC?TG2sa~+`}XQ%q+Ax>)~a4zccpSL9;hjd zK5NNMDk>Xm1dt~{%ub&jGXFwe>g7f3I(>$JL081OjABd-UHWgb9B#YVvXB9UI&CC( z1>fWVJ$pxbWQ`R3R^$c4XcRs?`3cjHbK`~{QfX&ARxBYRz9GxjA0}LtdUb{;K>E~= zmFY>wFJ7+7H27jj{Q;QA2 zF|S*2)W{+YeaPEnGMS}`Ujwlqu6iZnW^QIGvsAl`lAT)HSjF&5T8I|}Wp6h}K*+|W z2oZ=Be+SK$tTLl@e~sFZjv0wCw|Nr;o7H~oYWBFTwgyJyOj@Vh6_gTY zd8vnu4aBR9rN_8-xm}yd$BoiCLmApq+v*SbtOew+nAn}Pv)_k)z#~r9QuuXk@<`WE zd#nbHSjNcOy3+~8d%Rka!SK+}A4!6tDy2^nQU7=Gfe;V2ni#TeFc^`nM~`yE4GUg41} z;wRbf=p|z*xr{BNP(Qj4mZdV7W|C7b#InN`5JztZeKg=;|8LS!4fBDhI@vK?;)WI@ zyEoPFbC8d-=70rK3v!gXurj{(-%Qg7{C3|785@{-4zK z|ABA)f6HG%&6(AO-v2tBQP;ozg8q~8@0|a2b^g1AcXBC!Nt$5sA>l#&A%Xr60CoQB z0Ni^k{{$b8B@O?KEzu@L2iGz7COC=TrznhlKYpn^7*aeo!=Je}lIy03N%r2s)ha?f z>E#NI#{L!!Mc7YL8D|WOWOrfHMnTJ&49Y=uq`s;HG=B&P_e5S}(8>Hgub3r~DZcBj;&TT|dU`pM`}xhE_oUJ)%Rr-@vEnkRC&zO&j|?UAuAWdB z`C^Bt(xemvm9x>#z_H`d!*f;)jGlqdoOBI`YD?1~(RGo}Ol`53@a;0?jZ zShBF8tq0Bi9c{_*7p%M3=hhX}6S*~pRU8UpGclxU7ge|Af2} zIg%lTx<$la%ow0)C*)#n-d-t&^bPl6?#mBo-&be)IMCcf)$I*luP@rzCxj(`_7TbW zs9{A&C&v4>i2kPe*I|!9fGYmCV&5fIKMp@^LWRa74VLYFw-f%oosje|5P4W~;8S81 z%cN5M8{NBOQkg{vt1(#IQR_+UPd6Zw2c(&>V4%6VPqyNE7Op!Z940Gtf2*zn?7vpX z8$GfQ?K_Nl22yI=-izLI^?{_=Of7m#5X}=4(WI%Bw9b=w4>;Upn!v-D`KzhEU1Q1E z@X2J<(rjZPuOkGT;>EETW9nfWY{8(bEMZUPP>ji^qXPt$X&bjDCUR6&YK>xT3>{~n z8qG0v*&rZ{E!tE)Ux7c=9YzO2I)P(fr&Jm! zSTWn${bgU6nbZAp6~Ah3E9$XnwhBI|IMY_;H3<}p8>JLdK(Nm75}~ZZVQMnqF8uH^ zj-$WU8SEH;?ny3Luyvqv1vrrH+x4W+vwxNuQ7HIZ3>pOy9OL?YM?|tima{fp zpPG(nEvy+i*XJ_8Z>*Ut0l)2~sw7pIPx`^n$oXFJ*nMB{(d+tgF@R|_rx{em3L~P^ zv-vK%V|{)Um~No9x^RZ^Q=JduvGd3ni{d#wCxb-5LU*KW#crll!W6G`(cy&Yxg?*3 zN^IFH$;WkAySeg3C;QP{rn2ZsAcV%PsHA1EEJIpdt9l_(B8gep+e2Jckz$6HU0iu; z(}JiXFMo8nf1(-}rZ}FYN=^Cc@m(f^>P{|r76+`ZJHu!^-NK@)A7xO2)-ygKiVB}v z1E9KEer0yxSBg&ZLxJONJV@KEs4YB!Yq)y>S>;ESlcFO}8<|zW7!H#A)>`I8lp&BL zaTBIuNRZNi<28oCZFeWF;@WjN=+8n>l$$+E!RT1hcG4F5L1_wS?D8B)yg{uB4vJtx z`6;5IVEnx^dBcF;9Gb9#ULB#H%gJTKanTr0HzFS#1#u0fll|zu9d@G5DAC2Y@gT^X zP$83IxW$N@AIzx&aS8px7(JFj_cy`z<@=@(uYrI!QqZI_kJ$6h650I(UIAOLD38m-?0#2)zXesZUD5M3daL$aGLV%Xx^ z5M0moNq(yFK46(~!_~lwDKIr<)4_mj+zBeIx#Jx!W5gcBx#vx<#L!u4jD*3DGUW(c z0$KF3$5u}X9fQLMGw3LPaG! zDRGdDf!Lf3G&}U)^OV+^@an3PA=NU*1`a#|FMq3ejJ9dKf6I>6Jb43xtC*yLR^KaI zwV)f9AR&(|O2pcDH3N}h%;-IwV8)CY1Zh{A3=xAfY3l#xONo}z5skO(fkm(26*p#Y zi>1{VPgJKUmSio0W{XKXoqc2jSW3zT(k6iGEj)TLo4_r^I}m6furM>G(v2x`gcY{a z1V8B7anEOX??0Wep>mDMuRA6(cSRI7f(RGj25~#rP_8JTSZ$GsxHJhNCS2JU3RNcD z157Dmpz%cU<5q{+9U5NdD7iI5X4x*RrtrS!;Rv4rdWgE26Y-$jOXfD^I-P^Ga${Gl zBhA)i)QgLk52y2Rz(JR#w2iE?H(r>ud#>`=%&NRHU6T@^x~&r5QI-;_4;HvsX$6eQ zh2dh5|BDm;&lJ^KWM))i0SB<7bVQ2TBf$Ls!3%HRq+ui-3TidsKc=!K2;1d@sq9gp zHtr12$?d>rMTYHOW{A`-_<3CFS)i-<_wBCATI)~=A};=$6UKI=rX{lS6ur|1Ro|q| zKO13Yz%hJ368wR2!K zAuIWIoGkN-aVDvWoVqBY1Gel9^a9hpm{%)PD>Jtou`4c9E;T|KUzWxs0Qa&^Kp|u4 z2=w4L&^&%^Ir+kv%Fr;FD6Jr#Vst?Ej#xrMgI@@9pH-KLE=?rhvMFo};94*LaUfE! zNXZ1rLsb5Df*-ufj%Av4usRkRq2`6m^m3oWB9fo}>h zvRTfdt%-<#EzTB3REj|)M%Fa9RLY54w>v=^;HbaivO!_p5_DrjA4}5VDe25rSj5%_ zU;)$fcEyF=au5Wj7JH7$lM8nwnTrOag4?E~`T_S{^Kg>bBU%7=6n2LGVH&{>04P)r zBajf&x>;yTLuI0TL1LDT2q}BZt{d%Z>n=<)?N)B4Q%-M_lH`S_Wksya{n3=Sul3^* z8An@WaTQarAv_1AG^^9;=F5{J#vr~{kxvy4VH(0!CUP!32ir~&ullT;+okKWekOL{XlD&ImyE_ix-+K-xz!{ zpOzvO1MbBmil+ULKuc!$VZI4^cf}^i0EX{==oxL}bmCp%KkMi=39~SEYT_}+e7v~& zavfSS5lv%R)F{onw`cO+#8WSD$}8rJmR*kH&L_N#b(w8t}(v(^WuNDqV9VLW>;hKxA)Gxw57|&oa zfJyqHE(t`uAP1iUgqhyfKHw zhJnU#2cVnT+I`msB2-EJ&u zwib2WSK0DC6kY0g$g2Pr(AHp$YVjDRtlWBd69(2VC31&6+LBdtk7>q}*kawa(~fJy zl3pqgVzkw3%nw3IMlyi97;+o2yj0p{qrNdDBruia*d+b4#F}|5k1Y~7)h2e*xXyzLOWxb^U#IhMYPxQwgqJE10osQSKyG1st zH~~5P8V)0xVNRKTHWZ3kQVa#R(UaKKTX0UKrU>QvYM;{6jvUR2Vi)DgMx4mg!cy+G zPJw7IEmaqdY&FJt+jgQU0`_U+_(l=LL^pCZ8qQt615g553=lZcfw#G#G<$(B&1q&( zM91%!VjpHJs74Yl_dsJInOW5^mKQP7@hsF&21!S{+fd@geH2d%sB})J%!(ezj zxi$5$S>jNMF{WTY+2_n9I;U*J9bp?#I#?YV>4rGY-skF1BV?a+v1hcr+38ru0?${I+i`Qc(KTFx% z+32D;k(fH-sO?Y*W_BG4c5}ik$Qj-%`it@{X^bl;8R^DN@s5QUlPYC#yyTz=vL4Oo zWuZBMDt)>0cg=5U<6SSmi}E1`!$G(d|DV0Ewq;2r{{bw0{yWkV5b#&1C5*o!EdkK~ z{w1X6*O9Tlgu=WBLsPf;iklH$AK+L26=@0j2XOQaOv;WPPnVk1lGSD>zcQP&aa}UD zZX=X7fLAjXcC3&8>O`vvTt0^vj674oS6e~_Ajp^mgxkN?vbXDKKB{v0<&8WaHlH6E zT$g`2u~=%yJ55Z~mi=p>q?i7MF(YkByL!7;$pEP~?PI?|puXXRG0Aq-=|j+N%Pghq zyE-n)7_$8GrJUFO^N(7XFjm#Bhkr7o#1jtH1}{HRmxuZyYR@b(s^Tz7| zpTr5=QBkN@hpC7ZZ$g7)F)2%&t~5DUlHKct4QnCKqFm8Q$N=+&TN#4(zh*G7xzuAG zMlL=pN#YfJRY0?5RSoWPCIR;dM|bsOC@8){JkK6@YTe-M=y?-;di$v{<^{-6Wr_a5 zx>mEAVD(f~!v^w`X}MATBkbN@V~O7L>D;rY1B{F3H7E^}a&Bg=?uTi4DZ(aCbMqx0 zvd7c>NzBFZCB1=GiyYfda~MAz_kOhEH*sqK;3f}$0Fsrfwv5>fCRm;qxN96OiQ21} zo9*(~WSyzUZxCsp{{S>1Ddy>?IJ)e&h37=|9l9riaTLd8+OxWEmpgw4KgO)pP8x|D zkRdNqoH}NH(KN~`OpWX37LnP~KuLt-OsdbpKE}M6&o)&wx5GK(4whR^rL939J!+wU zJ5#LZ2*>giiF3F(CijrcST|ISW$G9o6C$xSX#eFqE&BE$akLL3PTxA>qj|Ihdu|xd z(Nt$fQ*eGxu zdkE$J0faXuyrKA&^$u->U=FtkX`anoWp43I!aM)I*uxaCkAT`#3vJH8K0h-op-UTN zND;~f42>r6MLjs+Yr@Io{Hr#L56cna_%Sk(|YC(Jhk9erjCKdOK zaC=x1rWY`1d->dP7#}kQS6Vv52is^uS^!kLPtkLm-dJon>`qN7^)lu(n&WWR*o4iL zOjYMblo5K2PHGf44$PrJB`L_Y&2gBl2;N0SP0Lq2__Z>0kBF_RFNhC)|7imXFPJfR zT(5BDsxfsb-F&GACZ_DT6_ChMs6{JAA`8_3qkAq_28PoYAY}VHj#Rnxs3#8Tag!Hhl9Wof(00z6tTbsH7lrWx$7h&X(10Tgte8eLHCp4& zw>k4R1No|OsT8HJJsQq zc~c9k0!kQmiU1o@+q<7zTEw!C&T1@_Pq9*UFmStP?2y2TisMR2aGtu1u8-WfqQMVl zWG{q(k&&Zss1@y6)%u>Q{BJ+{AJ5U$B&|eL%ZjNGE!u6X1kJl{qTOGz$h1$EwkqqUS2G{k-=Xrv4OO_Sc|pN=se(cL1|Y_Tx5Z0ozIObMB&A^7D%HiD9-Dr?*(m2C%>5YnaiY_nCsGB6{dGS z8x-ZStZVkibPH|^b{25+(k2VzsrA=D^eE*f=^1@^VaVtrdO|aCEyriOI*g%KL(y9w zKVNQE18Du=j^raGE3t`L#dp6^EBiLZ93Np{KcNb9MU0*g zW8QTmTBe$0H5G{Y`syV_obIW_db!mfz=Am#QJ;riyz&P?>rwQZhkqxSy4DLg0R97*Uj%pbgpp8cfNqnc3T|Isgzc1&BOPmTZZN4Mg;InvS0pJU zd}&OQq`eMEe?%_IrKwvk7GcV6)-QH&Ci%4=-I!zQWS8%*&NH1^Xe4M;1ZpR0SMcrm zIbrDa(0}^6*a09Xudgeezh|U-c&(Y}oQQ;0aev5pd2v<@hcN5{@k^r-WWldgzu-2l zk*mGscNoBEBN2KWWf`5c2-ph;2IWth;>)q4O*F*<%W1jb(OHyMa@b>Mi-4)J)+m!_ z-3>T62~7xw9NaeS>nWkJ>UWlug)2>+%{B4lD4(s`bIX)ZE2UvzJ?e2seoV1duNx=* zm6hwQB!VUGN=@5iZ7x=sX*zn-D&ctuFyoy1D4fSQ1|yCDQyw^jCa0Xo9GT0d1Kj*{ z!qj(uf*f&Cni6jg$(8J>_Zo0sYC{sDVHGd&{<58mIIkbH_;BRQCAtsYv7fy271k-{z2muAw33sh?CIHFGA1fys%eI1h!~B(Meu(S1#VuamVw~F2v8p$wcsel}*`50n(*5YNe*H?fg|;;mbHa zzO(aK3v;*vpR(pK^J`N0t?;EckI}eXTi{L|p660&g0ZKUUbk>>QP! z`&b5kGc$hve>l$r!m|Z$lzQ7sPLtt(wqkr&zfD- z5gnX1&MK(IAuqBqNMc53MIaakwpja*yr4y)A(V99x~+9wM*xR_q|wIgu@gm5jvi#y|HUV_lIkW^c;G`E5+9h5bTKc;7c+oh#~-fnPZ15!e@I zF#f2(-*b5ID2$Ak9`qVTUa}cWp!1ykYTZAEm}o1FIEam!Y{BJ=XmYfTX@67*^t<+R zAZa$dxE0p!rFX;ye}jEg9ON<&FCqnsBAqy*2%mOUhdqZ}Bb{Wb+)59tAV8p$rEstqlWS=w}E7@S^K3jd* z?A^W65GjA^$y}VZuHR^sYq8#x^kK&H_iPUZI^QHq&x4dUu9Lg>ANl_OR z5mC+I-zb&#Vf7OC5c&H|aSuY3{}cHy@^_RJdm2oO_W~l^?GV({SpQWjY9%k*NNsl$ zzOfaP{$Diwo8+plMQlp4)VOok`LkGgqb#aZx7C00@CoRTFk}81fsA|a8ejFnNsBj$ zH>C3z>{_`03=3p!Vn8J8fA;i$4U~*@i%oW{@f2%T_7L&Eu{Iiu&V2fbvD2gVe;7&a zm7*wzbCe+xdsVf4nZJve?GqH9R>!4^Hp;gL^hY-6S@1pIx~vKu;0ZH}Em8cj!v zV9&H1JFe<}=VdN48C6XQ|;hQjr9`#m!OUz_9=Zx%4s1G&qFrNjIV9DFGZJ~*8q zvoZzoerc8$o@|QNC+N~71j1Zi=%FI#pe+p$PP8iyiXM49{Ry^ z)-)#P&rylB7OhrZNigy&k3gP*c=ZTq{-chgW+PBOP ze{QE^F4H!tupJN~6Yl2xDg;tT`wyrXjJ1Oqq20G(@F!RQ&C+TA`c`BSLu(X`2>P;7HGy1j70F4sE@$oO8=377T zVtu$AIqorUk#WI&{%e2TL`WrWUHRJLr}8pbna=E zA0uFWt}in4!9_wuE)ir2{zQc+WfBz7?ggX(r2y;Om%1p1roasHPJ?ja+7c`%oV}*N z;*q<3U*5ps3nU4E+gNZ4>K}J1HI2bu(aN?J!@xAiC9gBnPUe;IK)_}p5M1>Ljz?Je zu_fD=J(nbAT%XN?yd-|No@TgP`ecOYW%YXU3COC+QDLH@V;4`vUEH+M6+tip1lFC{J}mQllZ zz)m=9y>$;qH6<0V=d)m~RT&Q*h<7_o0MjldXvL<*qj?h%5c9w6%^KO$ag%huZ{ zPe}CMno|uwzdAblsVbEV0Y6uXp=-+mTW=L9H+^%{k#MzAlY&R~ryd?0^Oo$>bV--c z#j1I8l)SOcd7yZ$TZ6VeI9GhmMuE~(zD~HgBb85!p(;w5Yqy@{C|X;bT^EzsX5IJ? zpy3-xW$wnVWUlwpyh~rIWL{#JLfz~ozuz>AFH3BKX#{L3_Xfar6Z%8txHCvTm=~X` z7T<=e4Rg6lV!`^cW_hrUdbJ7L$W^-kQij*`ha~K3SrAXrC0xWFzY-;YBR^G~_~I1N zhVG^3amp-gvyR^3k_U5>tJW3zWH%yH$j7)`WXSh?C(6uMrgE+xJnJ~bd7>ucPC!Zf zVYeo7v=ls_G`!j1hhGK{2+$*oY3O}R=4exTrUpoCM;K4*s2c4BpSAPxKGcoTVRjLP zm0HFT4G8*~*EA zof7e>&L_mktI!)=*xrSSs@iVh6~dK7ovbsSLr-m}`B*(z&&naQP1Gqxv{OvHPzM=y z1;umU(3KaH4-v@^CR3fKTS}sLq7(MU$$m&mwUcA75|(0cA{9kuKl!HZgl3uzEV+En zD7Wf$j!kvO>NQ0=L2Bz`FeDK7^CnvAAd4^2Xy*OS5cO{JV3PYVBo>A*NV|5Z?}kp$ z@bl83oZ{Q2jsWBKUU{r;yWu%=UX?SCq;ZA_LfD~O|4f6J{7Y%r$Zw&fsZzsA4@mPk<%R9ve z9Tn_4M8Tt+H-3?;>&)l)8<$j!<0w2n1_;2lZ@vhnBr}Mz+K|Z;nhmPb)#h0A38)AXBx}^qmN4~r5;>}0l6IiMN*z3^;zren;-vdot%eMeGrp4l zE;2s6*IlvIp|5D@0o@#X)=V{YNc5BK(5yV)%FHstA6H6+d0dWo?T`&W z(h(ojri~0S#9&v|1UZ=I97`p5KtN)V4r6=+K|(Nmz};P-o=AdoDE4}O=qR7Ipt6~z zR0C6^&xD>4RSyYpg+A=rH+(VGX&kVl-Vd#LCTFTPBk=TUUz_-|YFaIUNF{4D!H_4j zP#u6i$-5cc;)q{iw=BE|DhI?^HYz^Lzhpt3@}Gr<9O>07ZHXTruHKr30$5TV*MZF1 z$YyMa#BE}J}cL#p} zhrd?eAREEnd^1lUqbAEkI`z5san%B~Tgg`^G-GM!Jo#G8_y61m@h)ClRqyrdO}?m1 z`}O}%{;P}rU-n7PH>siIxM%+dI8mA-;OH*Y1|a#IrPXbgy6ttNwByVdqFtrUwA>Wo zyy7xd9KCF)koG{7X{P)fbWLUm4^Y^(xeno$`)zAet14OfyV9K$&rE4jM8bJpb1h(@ z<8|9vrThQ4-uPYc^`gEc1pkW)tg>${jzG(X9l#3mV>x!T*$YbSReCx&q*UX2co6O} z&!7K{c05$pWZ8Q0oXvz-n{?i#FIR9#x-#Oq7UaHw@K%!XBv5JbGP!#Dk#>~+sKD{5 zzx5Kt%%_My)5e<*)X70ekDsgVh%nZb%-U{qitukIa_0Z1k^&DpV6B={BZC*%t~~r9 zN}B_gIF}1^-sPxbC=sCdL|3(?Cy*klJQ$z&4=&QR4;%a24&3}EL)Xjz+&YCrW*kQc zm~gv5i>ZtkT-)x>qF7tWOot_%`0qx_+O8jY(144=YESHYGPTDi^ojda-&pqN;=+C? z0&LYva7dc>0#I}isv!r2Ym~6%YwX#gviE+xJ1>CJ<2+a8Fq5MhlCgSD6KLJB-4SnU z_Ycj+2lu%8?KRCaG+2xhetPmfH@Hcsd?MK#dW|isH9YpcHoLYu+mH1AZiKwGj=?jv z8Vi4nXHPfovD@R+20{j$>6q&@dljhO{8|J|McCXx6)3Vu>e=qPpFG&3$5 z4_%>jA6$w{_N^A`$7LuYEG5|ayW^`0^@eJFnmS2ho%W*x0b_>LHS8((rBowsi=Ea^ zzyX{tLnq6Bk5ZM9tCH`l$m}+P>eummXwy{Eps;qls6)92Bhv#MK!HmwsNOix5T%Vz z{&#a2-`MeSzXC`pM;>|VtkG0@Bi(TZi(L3yznScrQmhh&qQ~x7;?(@}K*Yv<|5Xxm z6`O7rBtG(l@8M=teiOv_gL}c27Gj?O){z;e3L{JHf6D)N*Ay}LlF8L`D{sdrvb+dh z)o*iMq}WjnjO~0DwSj0jYCHN*pZt$TL|j^XwYTJx1IOyV_Qy22OQQww5NM;uMzH?{ zl{7o*dHex=xy=^;y4=leQGvN&ACYSP}XZMgnV1obv{JPqke`=+!d;*m&|Sx?&w0|}hxwDjRR z9bJw(4|qJ|D|_lkrJ-dsb1zXZ$H3GJ^;=$bwS#8Z0P^6+oqUJ}tgx7oUQWnFbug_SLwCRa(%MPzR&fyJBgo$cVD%O1Vp6wrsx2^N06FHbok^q zv)-mFB=(xYYhOr~bXLQbv4lC4CnPRswo__%eKPcmNt8Geqy1uwV04#Ne*oVJ{{Tdm zr(S{q_NTg%=1#;)v+}&in5B7w>96#JiXjGa+I{pRz{J3&w#RV34(&67zcCMvd#<7H zre)H9r+CLE=b1HG#=C%=lo%+ZZ!Fswo=rKjEVRQdEZn^qVUvUEy!3+a(Y;^T*hLr!1rjxN(wb7DdYTga%DYN6{^mNE&(HSdE2fV7U{)-`r#Ty-hF?j1?s+} zSOu$5PXL6$xS)Ijtr7qg)-SW83Abky`Jm3_iEOhpDe0S;ESAkWnObbUJ!dn%Qhxo} z!lTr3>JNa@W4jY49dI5N{cAUd-&|SFsqK&sa6t1Se*j5nfguP=nufuFO<)ZIrIus^r|3o+p*848GU6&IELGs7K6Md7nGRsAL| za>LpEcXL2yT)+Y@*VybHsiK@?&AG@N-}-}QO%&k@S|0d-+a|VG;Rj#1suOFowPteG)Z2Uur9C#NtVbs52tQ(V+VeA>8ep z)^LN*ehLq|)2%cir=k1)$_H!e`fu&AoEvU!?R%v1{6IDC?loJ}x>pLt&T>mVEKs-6 zjZhx&1MY!A*r@G-m9O&SXA7r|S6S!OUfuk_7B&)3$#x2p-}h7UI@*f1F2pPzshDw8 z43^PtQh{g=k4LGS5mjm$PB?nG2525x9A+R~`($H3z zG1%RLZ7oCRW6LCbA%n;xZjZ}}5y=cY=E% z5kiI1X`IlPaV9RMuj*6est1~m>ppJ54tuv!bl$(`HqhCP+DOyYF&<<>FSU3QujO97 zv`b!mJ@@Dh*P}f&a+Wn5-q`HTd;pwRG*N$4Q|>1w-SzJ_B8CP&i@{;+>XeaIZ8`~} zk`8-(td4DhAG{zZC(e;l;p_J9FT0C=T%h-g;0pxX3gb8W8mpFLddRJQ$rs|No`Xyb zhExh!bT|(C6Qw4>V2M!nV{YB1_4NqKWZtJ)V7kRSmZ0e3gl(rB2;^6V#BV$cGWK)7 zm0Qxn$;l3mUuf+w%HxzK)3dSPj#TU& z!bM8Ei1L>Y-C^WWMQ4WfWY~!;Wp#&F zG3IUpqw90bXEpKuW!zvH13u}lVmZq_>GTJ=j`u{9d%CpYMJky2{v|amph0HZ*bU<^ zHEpb&TLt|Oz^@De#6*WGF}Rjl0WnO9TXc%-$TI(!u?^ylHVn>ggz&q)Mx@5 zh!+NXWaqFG@D@p z>c7QqY<%&(kF%bNlIb1sNNP8tWgmULm-dqSf@5#$;)&Nm%HCfROt#)iEOy?5gaMe$um-U4`XaH_q$~}`Vm@d@&9R}lu zjW&TvhNG&tSTdeR)K>Nc9QapMh@Id=-qh_mHDM%0K9&l$GFv3VtxG$#zq7zUpZuKN z@7W%&<`^~cO42fZjtBPI2vd0ICLX1N4}Jq~pA^PkJ|B?5l#n++3-Z&){#x(P$B}xx z`D=eK0iluZs?<#86)cJMs8A07jeU+hIZDRfON?D!Zb2%D{X|)1#pOOirrh)!&%GSV z6|w2Daf_|&R<&Wb{~*5z3vVjD(lM3`l5Dwf24@~cu z3hEtQcnE#@0K_%Q&3N^FL6b0AE(Y>Y4;vv4H2xXNi!zW6uJ@YYr2<6$`U9Ba?Dm4R zYCiOQsaBQ)-DPc`lBgHi<<=6B%TVR5S{Uit>@|jRK3939GvDQZbiPa^Ilh0X5VM$q z)8UzwjZj@%{aB_;E2}5xtxF9ix zS{Mf(?I|*mvi4u-owH9dJ*GupN@RPY9l>`jg=+H(+L@@;5d_C-3T9gD>*EQPWxV>%zLIeOwAcYOq4FNvngUBy_k=>C;|i*KdX5K^4Cpl-@pzNUh2* z4BJvfCiJEm(hbv>)7(HwyD!h5@MsncKV1sY`oGwF%c!=xuJ1QE1Pd12p}1Rt(&84N z!5xCTyR^mKT}q1+f=hs6#hn&+C{Um{6f4&Da6i|5KhOJ~^YNVb+ZiJx*?S~o&b8K9 zd+q)IWv)3lZZun=N>XnPUVO0~mCW+v(4w*J3LAvMEZjxEgfrt(cfS%R)Y6rmjf-nR z+Kz7<{xp%X=L5VqzfEW5ufBVh>V9MNbBYPA`gFmlDNAH=YBO84)axa8Feuxk`|14#EAag$*#NsT*;J0U-%J<3LvWcEMYLdX zf99-PQ%J{}#l8NvOKVA;voi==tVF67*?1zvrJ|dp7}bGm(#{sIdz5GHt(TzS27S}|j1vzO+voEb*w z(#?dfp5A5dkktJ`_XGd{1Eui#zW^o$_7`?l>SPOTLc*Gc&bFBI9&Ekvdi%CvX?*d8 z`}S1gOF!eX^yMTu;O>w5sG;nM)@j3jxO{&*rD zl%RuE%rgzS!KcF_Ow`)Xy}~C1XX_sTOX^N5oDBe zv<;X`=k(vbvFpo>#(#y$2$x1vOpi#oBDSuUhbCaN=Smcy1__N=sI@)_4pW_0b8}Qy z=9p;ZtSA7FqHA0w)`wyPmOxy_ zKOFiY8y=_`Xla)Unc>A$@wBt&bz0e!o`Qb?mzAno8D$**-@K`VZH&r+e*rlEG6ytc zH2ivt%JQZ5D~^aA!AfVgTGh|NLE4xD`@zAyucV>6%kt>D8O9u&(NB-vFe(cBr)^`F zRjv%pgttjaHT)MJ@8htEIA~dd+qJUV>YwT z3<;IpAPU6#{uh83)R~yN0tI)*%_Rh#6K%Y#s?7^VML}kitCC5(Xi)_6uVBchwqukw zcu9n##6sZD7`VwQRXohYI7Fb`-FFa@Zf5t&o&(=fHoXiZI5o%4sNwCtLQ>vw&9uW0O%Yl2AYf|MFt=VEstt*969-7eIT`E#Fw4nb1zk?d#rbJ@5mH0 z=LzLkahH8_{`DgzhbLsEYs-UKmHuAqFW{WB#J-w(IiP3)tHDX?*wknG@VTz5u5Mc? z)nR#`B5;6;9NG{)F1nE`xB!S1kS56M(Oxq3898A;^*pQ|ER<3^HD|!6w!ftSPCtaCPBq3W0=_Utftet(MN``EjIqv0>1!rGnXWo}=vzf5c7-WU31~uuO3?z- z(|AJ1U_LGh?+k%7MxW@(?2T$M&)T|Iv_g{ zCZG4|=UGalba2$ilc6Yl+!d@GX_$jrN_S0H{pjCeEZy(7e%W_aoAfhfpv9;#FjcZ} zW3ie26*H>ZVZUu8Z)&*U-mKZYB_1e0<}l}LkylrTRXOmguKrkNbl#^>_l;U9ehH%8 z5DjIO-HP|SMnul@+M#LcQtt+)+2^;M^*8ur>t~o(8xk@v;8O9s*ttoowCF* z>ZBwMI&P>8C*sxgVhg#7*tWDA(UF61;y1n*k{u3yP1-#8z2KT}%SAr!>pB)cxn6fLEC ziaGhf6v&iE)8a-dvIS(0OzC@QK;|PFq_|j!x3_tZuQd`CkhoKbQT!b>zs-5 z>OMi6*`e+#(0zhOygX;z7h3>DK~E!J)QHW}^5P%9 z->zhGI5WlcIhR<=iMXnfoEEGVd=TQ_VyY#YVr^_sj03q-AvbvDHx0(DN|3s1k?YgBU+YCPVmUS^A!v8fq`x3k;oZ z{aihNf?G?;&4p~6#PK(7GU|aKUpgoF9MG_XnEKan#^SXERqwNT6-unPlnLcq#U0X= zJQhvr)l{uG&l^$cGE`WYVgq+uyC8xOFHo`@l&571)#*i)gHV|DmD&9E6hwa^sRyWu z>IcB}<3(}>FDu_reD`zrSZxR)IL=m2f7*GBYELOWnBUPb_c5I;LRY5|K??c2jI#EV z?Z_UPZK8f)1<_MbW;|kmZTWWiLza9 z?7wY$qS1-F&El;eCt=diHzL;yIFYuQjsGy5;@L;q%i_kF{zy9}?;_D54FzWn9Z7c%#)6y=`Hx(M|I)j;#Q~x3REtA?eW%-Y^uJV9tH&xEQ4xhn(C2pG&}*L zI^)d{Bhe9KKh5cVX3Qo8$Do_8LobYRWwbcyB2H;%Fiq;mAskqUsV5}X?7KYI&r)-} zzNzbqeaoW*+TeMO?RLNxHFuped2gjz6n49jR}!@|#@+V+pTg5-hw-qlBg`J~CfY z$8P_E+G*1Ge!jL&ucOvLxpe-9>u^+B7`C<2yaL(0siXTBiWMEQa0fng^V^#IRRUN-F z1Q}JTDWHrag%z>f+VH6qVVT6%jgTnFjW)2TWBjjh>u(jLq@iQaX&{=S-*z#uKhH(;9H}??YbNz00~t! zv1WcDJ2szVGveT@E!Q{V;)Uy>0gJ0i&UI8-5o zQvF+&#WNi=o6JwdlM20E&+lS^8j&tkLmiUr*+W*vB1Nt zT7x&4{^OHt186^KFuJnT+U+OgK=zAM#oJRe!1-6Nk@kl={)!}*Dy;gzg}0@_dsaeH z#&?)e?8LDMNOI_Wh&4nXm@~^zZLkdWdtFz`?dX!v?wze{Y4P%*!eZ_d)+ zS-SD|JNE{GrFVQ6_k-v^9XRIfe7?;pk2xoxZKaGs1XC#SU)*SvhYh79wW>4DtJfa9 ztEhOAWwQT^EZaJg&-uL*rA18Wxlm@*9E+|Erwq>T9(O@)Uiy7zgX@FtwYZx^zpS!s zI*V<-{ArL$Lv)j}n*_%aeHw1Wh`LAb>bvqxR1YIQZ6)LTd21q5HBm%L$s(%`&vUq* zW2;d9K#}%))V4?*=0{D0liBWINm*(yUF|p@sY(xX-Xit!lQLucA7zFUm3qv(;0Og7 zrw7#th!DAFiC9RmP-8th=AV&Dv`Z624_AQA$mQ!nhc}Kd8;N&Y6~@mM)e6nBN`tfw zeq}6MQ6Hdpg%ZG?;*-d#=;gz7*y)Hx`4!O>&tj@8Eiv1$^Auo27lEzBE6yV`zjD&@ z;%bzEelolvMcTHR>J5gqE(C<~6rYvb4!!J@Vo;Sa05HHml8n;nadl>;0~r3agE}xi zps955w?k-TZP|Fnm$sHk_m*c98vWwXAfki$A-&81mo&XC16hO_sjb*hiiPV6=K$x- zV$t+_TcEUL=zK7!f+b`L03*~f>(R9JE-;GkD(gZL%;u(uXR62*iDwunCSSMEbS9w%PqFQgMx*B*pW=F@w$&_l%rfTY!uval zUt-h>+%D_9tT@oU$;NM@W>PU%hu^tWJ6lF+mWrjrU1u8Tk-A2^+7Ohqs?eElAfiod zxS$~lL-D{!GMpNBSpB4Z)VZ_g)se|zj@3`ddYtY%!6_YQZuLi5Q4t{7ad2*PNhtw&{`>NlW;)F(7nbSd~#Ovqq;LJ&;Qd49w^d z#QR50BW34aJkLzNwZbc?|1xf4Aa2f@lNfR0)ELdz7)f7x|BAQ4Gl?xvZ*`sAgF$#~ z`Vq~Lf%OvrD6kb6Z}~}#9iPPNkBo`=v^GyIngJORZO)4_@GjzQMKyj%c_@SUTwe}B zh~6s($WtAkjVcpC+@TQ0ckdeGPEAm`XS+|^54AwOU1e=S7$oeJ&kzHVQ;Ov``nq#D zpSU(vziBDWdDqegN+L59pj#CvKMd3>p3*XoscUhK_-h6<6j5j(fd@Ymb21XlRCUZ+ z2JrJSqVa3@6=Msm*qkgk?$&zF-8tFB@?!eU0;~iAiKHIECuWmRTaB!r78dL{ko#O2 zgSfr;<=HaHZ@LDJTON{AA4Nc5>@5pQzra+=-TnW@zc@JjeGoe?tTh)Y^YK$h7EXXB z)uKdpDQb|YGnfYb424QaPtdo8=d56&vp*NlSgmFZjd0{^7GfmwMUaDmRQN*OVd`aW z!o;=P4i!ZqFH@yhl0*t#tmBq8m9-w7D_zyk+Lx}qIre?AmE z{cB47EiZZgf>^QG^DS<44cO{e8l_4MxZpDvID>0$&JVMaw^X^p2s8AmkT~RO`Ki6)CBmn2k%v(DLZu4YDTOUVCb))A1nYWho-Ef($sR zx98=b!RZyE4=Q?Q9>;HsJT+-ubYbdIq}Cb5cjF|!A#6lF^N2n$k8RmV>T0N$W+-mF z_=QJ{$Wp;rv>U!nO;*aYOKGCtnJ!^p>ENgSvQW`0M1mu)m1EP7FJFn@hJoS)p0y^^ zjm~6-9Hd)F8x5NJ_Gu#ro?%)dT51Ti=_4^DQ3#ByZP;K$!)3 zWN$ue&RtRAwtt22a<+%4Ne4`6lDQb@&WQX4oKRN~9f|sjCu+GA`4Z@+p#n%T0a8#v ztn!W`s!$wp)b}Nw{{ACpH(sVlO@{t)X9E^&O}$l@nRK`_K~}q7vaL8CSN0zQ++(1TSuB%IJP-3=LltZ{h8`sni$ z*%(cxkKTYtacWGN^1T6i|UEH!%Z|%WaEDUSS(*7xd^D< z;syGCW;YnY^d~L1lt4X{71miFkEq60cE=Ar_oC1u zVDgJabIzw6ZMd}Fqq=k$O1vY_mJsAQOK}w%srZ6FW?;{?f`rf3>mOmp9l$_HXXR{F=B%VpVM} zox+&-L$M*5eh?)QTL04`N)3dGP#nwiP5`b0ZN0WHJ)FuvSsX?J3)Q)k@#r59fTZlu zBH76Q^B>sc#R{-jXMeE&%N@w40hn-O^+@o@88Rr6V8F89p1Q|Bwjaj5NLr+W=zXEd z-d4nxlM%$eYW(g>2P+@kP>U>(UN8;XZKiE)`KGJ0@-{3%4^ikj#U3YY^_2FV$J{r* zV*BH1O%D)@HYAI-TyodiO|7p#G_c>HUXhJ4`1?Zf*ChpIuXl&;l)d>@LGKg7;D^Wa z8H>03|BL_*_?l7=Q|8!U>a_7-isa$TxsHix!d5LOUdcPw6h>0-4~>!NaWUZP%&J1U zEW=8>`KioJH{L9rqQik75}z>@Mlb*=;fr*WKvL{7i7+IoV8WoD({sT|lM?Gqj=Hhh zUj1svC07YJdENs%v{1Ivh~PSw6d%?jQ6-AQ@G7DTOAIj6&dZS?Tbh7!m|O$IAp`Lk z0;HvzZ_HJ0JLEvf5}MU|$IbK(*Ab5LK4-0IavyE-g-Be9sP++4ooN&&V|{X-3lze) zt{(3E9LhJLNlt<<_PWPN*-a>U!so+OjaU752%hO6Rx3GvR{SOjnD#~CZ)Mu~dR!zM zN?HNcD~{6QWH4%E`n<3VKBq|le+4zTVn;P!?GTW?;VEHatBnMx`|j|lM}$x*e&iC9 zV+bxl9sM`~>!Jw!@rPZk;LnpQB2^y@M|L}*6Chc;kYqYkNs@Dj%yCH{qs9tS<`Gla z;&k#f-vBsWin=Lj?^@9Ej+n!yB5OQtr(gXA5Mo}7io8!&lB2$X7Rh0_@3+>dHYx*J z%B0+-;ca@m-E^TcD#lnU?g1xL-XnqgP^>09CCnK&7(_n*JKMFc>~V@4e6GHxtsz1f;oM_O!XLG7KW^9XO(kgDH!jZc%*vhSo&LCuoC#7=SaIuj*(Yqp z2zy9Km~mC4RUI@bz~>nXD9T_Ndm+PPsnCge-!=b@Zd?5d1s`Vj{OhMs{8qc1-k$GA zHX+d?H1{SHn}ofX0dvC#F(@Xv$R{@3zW0t%PN`Xf`X?_NIX5O6?ur*OV8~Gm9xHEQJkP z{eETYi|6;3j<3OP&TMybR>J!TwZMG&YiYJ;fQeH2;S7AbGTuquFqVKAx%TqVLn%!8 z+KlUuq#}IwZQVLH$NN1lIxomj^iI5}l>6^8xyo4TwhE|+&Zv+7Y48W(5+eIHMzl%k zQZi0l6(Lru_9HwEr1O|D67~fTA0_1Kwx}|5@-G0ML#N7gH$pneSKjFCqD`9Eg2yku z;zWJJZ>*!4Hu+)PY+f?sh0;DU<~%^g6}o5380EpLJfP*U6f=*V#?q|k`@0~3Y+%D* zrm#`+`Z(P|X#-f;&B!Pn6G7z0xyO5pU+IJTFt*S0F?&(I-}m=CPEJ=b+wf*{TW22J z>?M6~)Q@(>1`;OswF0Q|u=Dz>bhN{uE%!nFRh;NBx`_0@fC%PD^3Xf;jEEUrUGw+$ zWARdP)mZA0r657Qm&P6URU@^}E(ejHuexr>t?V0dnfzl#uObzkBn4ku{k&ncLq;im zv<9~Rc{zdJ@!@^)sb$1ZKR}eQIWFICqrt_uqGT_bqMJ@?QDHTL_~Cy<)PKd%nlfO? zeV>iz!!0(z_6ZW9UyJzUGL1f}YftlXT&prq(gd4O*&r?vUw8s_0HWskF5H8sd?JxA zAO@F6w-QBTpJAlC$@QkWAAL^7Grok+0+4#3ShQkLNC1G*!2$B6p3FS&@&k?aEi^Zm zQd&hk&oeJT6%I!*&KVA0dkHn~sgbu50(F*B5^ZBMbv%u#Yh2j6kiLW-KcfS$LO-&o zRnWR=3V%KZiAF;R00Rrb2V-xN(5QiU?DHGCeI*=UB{iF0U*>YCq6R~=()!Xryq4G+je&6Fl31ZW-ietq7lj;|>l!tfMhmA9ZeC?a_uqE)9 z?fkI&)@I_1EghM;->RC~@N!s?S1z+C(eo})$VJ@-zFRHQRJ34;la zUQO9Elc!n`^J-HDL4b66n9Bg&V)Hp_GNzTl?1+@;Z=;fkKZ}>cm3GiNC?$h z_GyUU#&gmvk!K&|O?At!zG}v+U_=r&qtSJoU;dDeY9pGqd<>$vk_^l?e~P{;_LKi< z;fM7QN7RAY+=IxvBWqc2rz2_t-d&hu*z3pt!*&4{Yn_x(m-?93$pOYE)^ZSQN5?)hnQ7&=rflnB(@cH>VVITSiZ}pB| zPGH7qVqJg8w9bE%)Fa>jW4yWm#LlHj2r;T5tqiu}O<3c6Dm%s>lJX_ZnUuz6q{S9|9c30@%Ft}BV~C7e^&HSWLa+3>Y{_moVs|3JP8!0?x*J*r z>g9&LDA!aCT!51{w$_?g?NL-5D!JC6;y%`y#wN}={%#t60A~2Yn2MoO({N$^I;#;X2N8H1 zZC#3U;e8e7&~}+uoJ>^C@C&3kI+TuHzq+VejY*PHOMIR|xaN7@&{re=@LdK~@t{D$ zm5lzUI^rOW9bi(bw13lKLGO1PT<=;d1Yzd*SK@F82SJHWl;S>mcAmybSe+9z9;ey# zI6rl|4z|p{3s!6UQ8N~8#$(cOtIceFEgGjblRC>=m4^tqW=*ue4wt4XkN=;Z!M_JY zIRo4jBUu_1sm^?Zt`mA>d@@GZY+vG2jS7nwnh_ z$*f!Q+Bg=PrpbkUkM}80&x%KUwkc4Hi@dev9t{lJl?I|kh{@>5%rJ=G2)o!Cbvfz+ zYdxh0-%dp{reSA^1_`|xg^rH2F>P0IqrjQtqh8`OduJA!MJDMj1UcNcLJ-C9^0;U7 zF7N5!=rY8qQ7|6e@8Hc1bo^#^TnM#t3AdzkpT;697{w8zpeYN}P(_U<)X_I^k>&V! z=`l%BG=4cQL|3#-I>YnkiXI6j($B6cE9+2mWe_dTFbRyIq&mIYRc$sSwEBcJ)@;dd z3KQRRN2xGQj_EGwHfyGd0nyxb$=ww4RMVP>8jVO$-AF5SJ~*9gWb5!dYnFe$t7UIQ zJ^mxR?JL}p-7@nL7ch3qIqQ`=`7W(~+$AVmbdGN%W{}h=&8ghHbuho?s_G+Lo6MF| z_OW9Un*~SI5%HQd+A1812a1`*%%d5BQ5q_@eQ1|7=oA3yWXy-QRT&&cPulpU{6wFV z2l!~(8WweZDb{*f3Mq0;sfuIyp^;kegszERG_Pyo4x+c;&}F5~XV7VvEvr&e3H5LA zlYv|wo)Fs`76y?J@;lGD!nR~>^-A(q3824`qnW@FnsEnHvGe1Ej>4~21Z)d&AJanJ zXCR-?O;m;xvu5RF7+fTQmn#s>P>|T_Sd%xa*|Uy>KDu%qZ$_b|+yJAu!0q`m?@ZDU zm$VFqu6#p1b?yxdJObg|F}M>6jE!-(T>HMU!rW6*F(gb&?v86Qs&ey#<|BnB^R$oFMmmOQD{7Entim{Zom%^XRZID;xwBM=4Kpn` z(#ViO7$i?G4>C4jcEa%@ox4aJD=gbQUnZwwa;pVveW5is&-80M>;waZj}}~%=`EeIaccvlTN9-c z@28}$Gu?;{u%dpvu6|tZKAY>)Y6nd?Us34p{(bh8nmGYw zKYBzzBhg%T$Wtp*#Gzuz=eOQs?GIh!q(rEUn;R_vPWTvt@Rl&ugMs-km7^;WjX7B^`$HJL>XG656u^i830Sm%|U9Um{Q~$@G zFo`T$14BxQ-aI9ZNw74eo%*JAFp?IWw`6j!b9mPFpBqzuWGps8mZ_YeujcYlLkRU;MMM|v zl4UH~Rh!YZn0R`$BU8IwYVxgc<(e~}?(twKX8$X>|1^Y1y^9?;vqIL1TFSnUFe8+G z_hkQ-qt2U<;t;QM8ICiu9Ou{r9w1PFjDY=kS)CVx$xgg-u->E@KTZ|6nS4W1Pd!9@6gdxMd-u ziNRbx=p58qKL0aC1c}&YPbKv0Z+b{8fo^7urj|^tcGL2I4@D28mWm3S$^wuQQRJPp z6b&2Fv)Ap%#ar#IP%Sbw`yYG9wrTP%U&>10GJAa-RYQ z4j2Z&8*TaHnQWAXN-s^Tr7XS-{lI`r!h#gQF^`vV26KJjj)GoX#gnJLCgWO~2|FVJ zb0|w6)QWiyN{35RWj*UloM;YFhmR_ReyGeX(slvPfvCO!{F{zU(wmyBF2pIVo%kcj zsI?0JxgjQ;PDh(ccphmsa~qbF>BM>BFBWj<#rGb+X5w&sHtXxI%Bv^su<(5wwWVa< zRBJ~j1!U-~YBSxpi;taePgW`syD!!lW1;skUg{N7=i+t&FC4a1qWxJ?A)) zN+W(oc2njSCcDxa^ph-dg};D!WXSLl25GwU@JT92WKhU=_gbfli*W8KwC)QclT)Ws zt4#I({72N4N+sy4=U)KEe>u}^K;gO$dpg7WWwR%RSv`tyL0kHnUHXv`i<)9ikj#Gu zKO$jw|9Llj^VK~ks!vdnR4Q*VPF>H~(caJktWuo7(#oPULf{lN1cnM=I!yHr#&~yG z*j0edQL+8?mv24AZ&+c&5hi-mcic4Ur=Y4ACP%z-gju>uS}4};WTh`LRkbDGCI7qg z?ncEJ?j{8=q#90Nv~hxD`rY?7KRYW7sCn3L1V!Oo*WR~G~?BDmhr1Cl_o1{`~4u8$%#RXME7Oo83bbV z;f8#1s?qxFTrR9?*YWTs?mwn?dm|N1bI-+5E8NzG$B1_?a=5wz=bzTuap_C#v~N1e z53GU)jHU+;3xu1Gn4ayZUs``G>s0EMdR2_5!t#qRe?7~T0oiQ$9iY?4io08AX50Pi z>zV2BbIKDl-3L3#l}i2Fg1%AK^N}yBhH|}c$I4WZX$3E3RqKrsaC!;FA;E!d?3*@s zH&uyUnW~vCJQS}`GWs6yUU?RXTUo05;a8nl4|^iwUzoQ%O_7@E`dD1|*i0zGRJ1Ne{|O_tC5BB&xA)xeMR5yX$Q446|0$WN`Q|Sk zRT-kBKo~B%l6^f1AEI3S%>?L>7qe^*&h&|ufQ~4uVM75a*h77Jn^mb>t&CQeAP`_?#OG1aaJC;W&r(1bO zS@@|qr#E&U#4gnE`|8ck{fU%Og#$&7p(!qh?|e&AZ=qwH7V*(zNk|?!>6{OW%m(Fg zp@V8!w+MkYbZEPVlyAnK}@|Zs0&1_`mgzfo{A_voEQC$5Z zb)iI$w5{7%3S;wJ%1;Lmq%72dAJ^fry-pU`JYt1aut4Vw8pgR5R5Rw6vVcqKyn08z zxNW0IvHgBrhho~y=Id8Q(DbHu%X}5P*CaJw`VpHY3ue=8J&cMlAN!RDiHLAF?FS@R z7iclq%XjkztvJ`2?&U&9Hm5_xA!=?kYH>N>=Gyd&`1TS)k6`%ENWRD(xs|vR z&oqs`@e!v^bPCoQK3{M;N8{=S;xtq$Q>Z8J^c{L8CP;N*gc2;jP=PWL;l;z=a+0u( z2F$U?l-ayqlPZJuFWYDpYs+R{L>TO)zIx@{EWPl>VxB$}JoY`?M$~^M%Y-eKKGV=(KNEb$+rP7}O)o(qugZ*?aI*xD@i2+vxLn9kk#*`;<`9vCD=LBCnb-|kL( zOQe9_nBBfrm&w%QAM7?^IDNl}y3Cu`_;hu=fA&-^WSJ&KiP9dq1VNYcHUn^#NGvZz^|Oxw7X@E++@NefLE zr`eI!sVH1*U`_<|>H6dAVo|vWS9-Xwr8Mobct&snldcy zc14b70V0F&LsFuO{sK63FaT79CIgW9bvH;+re0w!qjF3ucryC6o;_w`PX5z=1rmxGs262ln5y26aY1l$=;EzpQCGgf&b5V&^Qv@3^1XiQKqyvw6kfh& z9gk6SkQfQ-k~O+I%TXY}cvVXvf-xeej(t>0$`mK>~MNYk&lQ88^G zZnXDv1hly>8w<3o7Akb;wBO?CcUmikB61Kv=}||zMuTHHN|(ikyN}T6;W?Y$fQ5H* zXzHs)Ghc*5vH;bTx6Oh$a=r9hauVQ6uIsUF%~qO^TTO1d>LETPqWam~>}ER3{6ERP z+T*=LQ9;D^-+K@3bz(M19%XOq^!EzLU7ii?$51NaIf~?P$Q`q&}ZP2GKxaPI)(+Ka5*xXmZcrXB%2(O?cgm8&UR5 z6brH0HQ#%rjAzRgFNw`dBQG+|*Gn5JUk4UnLlrc4tCX8fEeDQWJ*V5K6ta`NzO#A? z|3~;Snwfa{S-%1spAvGFZ7~<)=2hO6-M;6?dy`GwCmvpMp^*YX?o~Khpp9B` ztLmlX-KkWKqfL3k>lfb>Pw?0|{=?!L5+`}TMJ#{)+4O^)lgG%%f(tgm|qU?qUe)8^=M4f9mk3*pNdF=wPO)_iI?gX%A9`VSS; z1?!kxo*V_8*=*@hO^QBVhU$KYr=Gj7i+h&O{up1ZuTqD=(x8hSA2$qOLmfisiaX^< zzY0@cErMREGgS7#!6_$x$%YK3z~9!v{THF^3Lf?!Q=;(X2v^1Z&`1{%lMcL4^J>GK z^Pk*HY3?f<&=3Dbd5ad|4<3hX z2KHZt5QKFXLdB(Jx)phR=w)g8?msj1A&AY|uQ!#M2yYV@ScFRRvE;J^O~1_OTQjA1 zgJ;U}%XoE{-r`#M?UL*#D^CZ}5FPyQFm8~Hc_!ZHIrrn|&0dHFn)`&|biR*E2fJ?` z$FrcdmW+(AA%h!bv<|Q>;zh(n&n@C~Rz3f-)dF1#}N$* z1Y}%eMEcT!&Olrwux~*^ChJH{{(+avdeTV=(W>{7q z=2lKq2`?ttny+8L7BXiVHhM%MbMfJSwAa72+*8>Gc5GC(XqN<;CD?X6=hboFx}7or z07P3TZc!_{b>$v64#_rox-NwKP#AG!4`>^(NWM-n+?94nSA7#Uw;6QKt+9OqmvG-; zK%uxyJFt;*Or~pXwS}hr5)0diy|2(=i5j*td35siSCJeW%Vr)^|J~Ie0W9zP_DPCr z4$|Io#d%!$oWsP|u%*Mk1-^eMT*%VQ*!B~JL*>yfaRLPw>l+I(!-%9DwU?>_jSQCJ z)5sNYk zc}dyyOKR+Tq+ZhV7?!N$#5Z!ak0Q>GF?5*oVGnvTjdvo;V0}G~32yA(RMWfOAMVQQ zO0G@-k1yVs-`-U{H{n@fUK3nQRUF>!T_bkXtx6@lD<$CVSfeZx9H`F07!va__PIqI zM>ix(`Z<1Xj$V&0;;9j#j9NcCIn4sHMoas^#wbG?@2MRbtB5HVBX!jL=yeOL_d;%3 zzAx4~M}hRl`f9>nKnx+AUuP=wfU_hV?S4=?_Y+kp)rW*F_g4F4_dfKuBcQw3_m`WD z)B8B|f%nD?pNt?1ZLXoLatq9+u3shxMt`d+CC$q@((E`!VNtZHvkkl6d_``bB)W{G3!Lg z(Ew6No&LV)`sm${_7$hn_1N1t@|N@3pGVX_3C-#?;b|ZY^L}HwtFHyjlFML2-ZDhO89tHz0EbL4*D&`9MhyGTpQI(E z*odRDNF#12eFA?Om2qJ2O>IDG)<W+WB;E+DNB9Ru7hpGlG(s2V9QD`xx#9L^^aDf;{*iw>eV>vBY$A9vTDT} zjhoQT++=Ir$?|22*W?-(T{khub->+))p|~P_jrsEM`NHK8>LGvTWC;cl?_g zN1AF^Sgwpt7>Qg=hXz1`s+k{yh@@7=_n--y!~eoc{@2r%-V%5i9E(NxeY~mE#J+N; zHH?n84nX3)rl5yd*p5Vz=0gdySb)wwVKdD@fB|U&z9atn5)ED#1q-~&NGm1dq`1{Z z5~FY-)(cONNGo}V6S1SeR5H!LwPaiPAwaDRg)b>VQQMO_L?x%owKI9t+1Cb>NDsF- z56f=Z=17a9-ARUI^G;8CjBCNfIRT zmAU~=s$@Sq`7D#=L+UK@Lb~FAUMb$gjsPfytTVog>t~P|Vx_XIOf%QJ8?FvPWOWI& zdYkSDx{CMwSEbglm+@U)nzzZlO2t6)N2w89Seal=y=IGoZ)-vksDtx@4YASo#mYJSuq4}Zk z!WFEG)I7pqlIK<`L1mQz2e(?jM5=2IMb(tssMR3n0Nd-1FJ`SPE5|XGp=7dY4&e)T zjq&nOoK)h0>?U6lH>T-Z2*+o`jSyj5G1N83jbKl7b!PGtgKmESq&?fBx|vIn& zb-mM{F?EB=&95Vi94XCh&!uIH&ZRJ0!lPv*bTDX4t`JCz=+g$81lk(^nz@dcsL4ef zc~~`?`nrV~RT~0EP`Z4-WJ+)=suUrnNz~sZ4Q^T?do@wq(zmq;*)CsvoRImS?qJ>_!oduprmc%&C}p{ zIoI(QaP}8K7W7&Vf|Zg)`4<3b?j2SgLZ?=Vr5Th>4Gw|M3P(#;UQ8{yBndI&}}1N*v8?MG`Mds_@;WXZkHZzRKk>YdP)OY~1FgNlWA%Wf&c;-1 z%GL904vTe7Ux^^g&RHBkrLMOAL&>Oe^agBk0x44HZU~@8f81$hFlOldPTrrOzeG7F z`>=J(me8#E;mh=m0I6Lq$>XlM$m0YdIWbNodCr3-%_`A)_lKu^UfO54$lOwzCEOhM zt!rNG+DF>LD0*(67XiJ^1^{mTs5;x7XP+kCn{1ScyY$%Rx{Sf}I)aW3er?dRg5cu}EqmZmIiLcp% zA10Bd;qnoA7{d$($3^h=?iHK~_H3p~wfvGUBCgdYR-r?3J-A{gDB4JE*LEH)JR|wM zeAbds6|D1=+!YGTI#}g@q9K^+=QHqyoNv2huXd%r@C@Y7URcfZ9y+W zFFRF_DexcKO*W@ql)=$zU3P}Ny^2{ss8|Ylk3buukCLCnM?W=VqF?x26{k9~_=e|n z3Pa}$rtq$EGx;}dw~L<7cyu3$%_H=u+nsrFa&8n%R$NjeetMsAeY4j?!CYVaY}%L=|yE^ZKurS&tuQ_ep2M!Z$Cku*s^0 zpxTL)=IZkYB49Rapa59H5bn_O+8_BMnx*t4J{MV%@0CM5i|8RVEms`tPMeaaKRW&OV_R^}j=(<{rx#j=r?aJSwP~816#x`h%F?Lxp7<9-|NDXG}Qm9BiH;!@rXv#)551G?*e;gZT8^b*%tx~F$u_w!oMBMRxl`j!<#E?aTy z5Dix%V>X2#2s4kAF44^1?^Ni}E|W0vZjJJq+-ebbR&on&X*(NwXb~1KbFZ!Ov*?RZ z;3GsrX`i}!0alOn2i-%^mk>2DP9l1lm@V$2T7_&!+@;y3yDH2pl#}al5y0 zbw=d;*R!R=j)#iQ9+`(He^YBFwMUPw0AF?%9%dkR$gcowE6o)#w+=h~+?!HYFYqaD z-O9STFf=JzNn0@AAU_wl6vy81F_(d#~M1XooxcO7wB_#u+wR1IJ(MbI1-kmAlF5H1~3lVeLfnY)2SO#XGwDe{M zy4w9m<>3|!dBF)!W+Z(8 z%Y7lt$rae#F5pqQ>@r#1UgJX*7ylFm6oT{eQ{<%+6n4|Z| z2-^I<$qQr3ngZ>a4vR`whMy6lon#VgNfRZJYl3};aq_$Tcx22<#nv^`E%bvV4YRi& z+-sI3uSB~Z77e$+R0Ad4Ds~3S(!Qnn%3OGo0#85(E>U~mybS9SDZ#{^3rM7pY#MkB z;##CjwCAP`^_C8`bA6jV$tf|Q?lkh63o9atRlK?(U3Z3d5MqyW!%sLSzw6)e^Xiz`3QwrT;>FvZ_ge(2Ky&=j=bZQ|YD65&PWJ-Z|e*iNNTj~)AiIOen_eH_;7 zmSQ@@wRQ5^)aCyGjre2U^7pV>Yo@{cST3o>%vTBXt~#^Ho}P9{zKWeTyj=}s*vGt$ z8BYr<9JTpodA`_7Vi7o0zBO1eMRC`~Qf%uqUzS!?CYVTEwg2z_UV?r*%E3H1TlDzT z?~bW9HKGnmIqY8H{`oQ7WWVv0*-F zhwvBAlf(s$$eYiJ(39-eGR$08_mf$Jh1Nk7$k9wl+8W}SUz~Z%AVThUzW_alrZ@kC zbqbq1KS*KC%jZljmfdoN4IfpFwH9jET{q~#U(~zM8%4x7i{dMs4$#GPAY6Jx{Kj(nd~PH(R9G>Ri7Q8GJBt+K(;_0Cj#%tNhdx_SkQ!6Quu z4j6TsjoHk)@XX`m89wR`6x>Mdb?|2FTTH1MPk*YP?b|kimh8YEDme?)pHR}e0Edir zQUMdXu$%)55zO&?reVAOzKSa8o`(Op+$EH?^M=|ncl_Hr?L??`Tdc}ETX2)k_5~7s5JSI-B(E)Ki1y+hD-G0R>_^DDgi+BE&Yx9 z$G^vvI876#74TY&r7eaA=GK2iESR zmhS5lR7N=)h|8sn!T(|&g07p^%_ye(4LoR$e%qsPX*Bk4RjAPVVcdC& zeuhvJ9a-W1%TwXcJ4`f!M+?q0i0#cCE0ERmQcGP?eI(mPxOGc3XZxJQ2kH!)dF~&j z{yx@FQBC3}Inv$`mq_wEKMXl-ab4(9$MwTu?e>@|*b^~ zqHj!797ii8$4ZiIezH+{SV7f#jX~k$*Fx^xB;#xcF(vw7PeRGx;Lky49%M z?Tl#>q_-X)TjE!UdadRLwf>ME2A3h#dQX@~EflF{0`JS@?9v?q7HX;1OPyJF-t8jz zAT|dW>_X;OM4_3`shXPTz4gPM>V=-++}TBs=BwjImE{fN2!dXGVKIzWdL?c1SI||N zJLQOx^h_^vp%*)1o3O3$&xh~Ooy+5O`&jPY^%5ryb0n5s*vp>~AA4+Pg6kqX!`l}h#C9iWhjHe-Bym%^l`(Bqy z)@Nh}`*bN+C5jnNgcQ^Y=Kv%b%s#eFAx~(FmGO8%DI+UrulueTSK5tlR@2F=&bj(% zog(M6Kve#VN6u%yFu)L8`-4;c9^U2UNwE>5Y9^S$ze~nUnYW6$?ENMWp8DBUF@U-C zB!Bb|H#RuX7Ajh=#1GdwHs&|DBmDt}A-amrs*7JHo5R*O0eQG5}dSA5L@uCEb-3!Jg?9D3rlgyJeWYo?*yA zU@O`~Yj>o|oLzR&Mx6jPV95uqa+|h&z18@#BCVA>gO3rKh8O(Gm2&jypvShH{A;b0 zvXm(loBE$M`Lw8;Hhee#TKKjSM~Thpn|3XI+L@V8xL@N~*J5kM4^R^V#4u9g5mtte zKx&~DmLMI2KbJ-#Ln9t|yoCLzI`PKrGc^ZZbMQjTD!+!N%E!8)Fv(ZOn+oMrgH)qJ zqlMtetx(5qy)N68>)|e_z;?lLZ%I!R&9HQ5W|7I6WbpUCIPa(J!Xnfh0q}a+K!ylp zEoi0Hi7#@PNH6AYzUW#LTa1E8nKpAPlh;pw)s~C1N1KYBif-BUzj3$uM6-9c;pPnx?6MDuBH~e1I?)?$k3DVQd!n_;Z0L+fl!6X36O5Iu*R}0UAI>WKDBp)B271R32Pi zs`h1Znzf!l%<_r1f$?nBoP*<2Sj|XT&W3p&ZeDR#dVv<*yY%9X#|60N{aKL}n)Lob zH5&Q;K@A%8?|d3GNRif|pCJr_WtS0mQ*-9ri(n)f|hkzu}wV@PikID%m=Y$fw^~*Lu3#P#IYLpQg4~yljYuLY_ za9QL&Tjf7PnDd=05~-erR@Z>;7Q5%GI&^D&f~HsyfQCnN@!fgPy$r_alts|ww-W?D zFL;BpzVeB24nY7Q;yMheggbYB*&7>h!oVMwW*#@HnuqtGz!<+Y$^u?~wZbU=W2D6c z>D3w|t%V4S=i;jkaqtaB_==?A5wk@r3V!*X4v(|naA{z>)Ti*8ztyFDTQ_sJ;bm?ewpehjn>Mcv6KDFeH}%P`kdq zvwEldjG5L7*_yB5qNDRCpDvddjztQFqhx%?*Et{?B^cC zQ_ZvWIe@R>c+{wn(${1v*!A05`&-tN#Hs`RoSqFXn~a3y6Ryx#F^2)69t|faGb-Z_ zfvQ!idjg&O`z|NMXQS&Gj6*4Mr&FL0PaD46`RlYr4zh^1Q3tkPzTl(Soe`Zalpx+} z?12gDRI2ZdeG`J|DU95g+z=XfRP=tc;jM3&;AN{Z;hch{io_#maQ6((8E`x$8`Hiq zx(I%i-GphOGB%0qqp;3NkP0(N5%7SC7}9(-;ssnyspx(GT%JOM9^%85>fEp8im{Yc zlZ#aiPYAyNctJhf{U%1Qdo>jsQIq*mi-e|LIIa!+*RBUfZZ_Hve@0(c@2YHjm!(@M zO;XwDU~Iq9+C$gm0Zjjk>#$$3HbacNAASdjn0oBSf-Q`gb?>*KPrYN2<|BJ?RC&9< zo-AxLA1}0yOq|;mY_9iZM;m2eAxKCc!hPk@lu)r|OG#KkY>F^c=kjKZYw8SQJZy7{9p~d{s6HIRQogIqw4`0~@ zOH8oO)*jf@h46X#A?#fCRCU2YPbVXj|?9f|5!N? zQZ$!@i*l%RhNMV4hD7L!&(*X19#-hYl@tulOe8lQu9f^;V_LVFwXJJD{njnWr^r+E z(BL$NrLA0UEd3;%T;V1?SNat9RmnS8uc8B+o%z-ab- znwUWckT>`(UtMjjaEcb1t3qwf%;Zb9)3+e{?0FcYNDw z?4c`R)lUw>i~GD=>jj_W>`sXu2B?#bymtziRynD_9<+qBA@G);ZFfqo`zjP>P5QMI z=R3e4CZFI9-P4>NXFyLr6I4_Mo$U77kAB>}J5)Udk%$Tr!rN2jFnVac_F0}8W_ zy+l^nd8<+(0}1#*>5K5&)$SKZ&yr^C=*;OVfHfqsaYx|7A>)e=uB8h<`(U0((dns~!1 z+pVoR{@`Cz0pbZ0!FQnlJ0Q5}wQ{{2{N^9HY2!XPDTpIu$_)J6?rYd1s-n@=Pw4pd zZR#IdfQgMu8|O?Ti3;BNl5A33`f!gK!|MSP!9TS~QapUW9k27*{?Q3PbyN=Ou=CTT zu8S(j3iw+>?4pi&oRe)svr~cl*zwW$Uup-Sz`6xM6Jt<4dcOhxkwng{^p5#9#<%c^ zF^H$B=oymWzpBEq_p0{sXogOFdDpz~Yt@3!s(O8K3)KT{2J?vA@|M!#!I4pZ53f|t zFG~qABs?3S4p5U*z1j0N#66&?850D7`_;GxW(sH0$h5T6y~w?kebcy>3={Z}q!UvW zot`WXgI-+8LlB2v!n1|up+5!yaX0Z%cn$H-NwP`oI`xwPc%8h_{7`5?P>G{*l>H)P z91<_|8IoNS`UJ7lbB}nBt`7bDPd9d#)^hHMc1P44E7}MG@Z2$1-@#6{`oCnqpnPBsB?S{*VLc+3SJ zo_cOswFQTJQTN{7cR@kLXvd^9umnf}mlMb{UHB8eN<*+}6Kg z%ifg4T1mmgJZbPLTNXa$*??lreHdq;bh^C@v)V?oSY@d($!uswKnpvr-}r62f2Q-9 z?5ccC?EY%j_-R=}wQHRH?8))4wE$Q7(%XdFYvAX;Cv(MWP;rEoIbq$@bOV5yX?T*7Zu#Yj{r$BpNMX8`S|IO$OHWaOV z^VY$}p1`%8Fr)a|kNss_Px#c`N_oF-13Prt%?0~_+qmHuYyj0AmJs(8W-d93!-np| z(V6vHW8C?3Yxn)T<*u5>>h}seNbEwu_pLY8cv4eZMVtc)PNjTb7g}pK_M^y?Ld5Oc zM%e0nSUD=c;QR>ZaxKj##xZsdhJ2ljE^026vbkS0#TE`r_h2$1TOCdXV2Grhq(3d>NwcaZP;%Nta@U?>!jKy2n$3!j?myJ~J zkCANtoK*+MWg9}_jcj?!JX@6ZxYcQ1YDe`$!Y73EF+U-Jb|DjD(vm%t1-eJ#k(NOz zM#Y4u$(Y8`JjCwcaqRy?;BVq@5`Z&ctxxdfzJ`Cyl+jIuTAzCp zQu90&W@GRE5C`w1=3;9prz0FxXLay&yhYoNUpyJr3(0g$Q;>KlTDs9c)`WH5c(1#t zaBHRXos~XG!bPBJHm8T-Ns(>V?3hM4b#scI6hOlD1fx+3eMxJNC0>e85^Feur1)xU z6>i=s;rkZw5P%x(ro+$YwYXQsxdkq?JtL;1`|r%VaPrBt%6=62#U;f%`A}e!;haL8 zLcBo&x~~1waR6LWVx{{(_JBS=z#mfS$YZE0O$BEbB)QOUH8Gt7$pERmH-Gcxbo+|6 zb-A})y8!@XdY(evU)B^xCO+vGRE0k4pXj|jq(kt;!I;LN5Thb*{Uhl@(4_LGpc=+E@+Amxq*pgyiXp$A#) zib^4iK1ir&`@C#P6?u)l*9+a)OqY%i_b63ekPX=)+*ivK+u0j8bAaYvTL^8c$syFL z-vH}zX9mA1-f2{=pl%(C!$lP$>=F54bKip@Q3g}*kQgg+=>!|AcN*fWg0n=k^Y zsE&`lQ=0hW4g~vWl4QQ6nrwGbVm#}g#6niuNGYX`rYsj=E2l@f?;&X|NL_v|U53Y; za-tj2b>o~YKS103p%TS97@zKm-OkS&O<5;KFLstwLz|>EosHIffuoa5&LZElvJ-pUIV|zS=)FK zr`HRz5i87@^uj;0gc+0b2;5!y7}Lo6jL~fS6Pky|Gk%97bLH+$^u4h#<*-~Gd1Ci{ zZmwfqw^LGI>VToQy>16Q$NeIicy35o20b)eZl|34oA0ApupNf`lS6sA?Ro4rhr`k` z8UXaDkVF@3NZZsV>=qo@WI?<11r~v-ZWqKxw(HKvt2LXW4Cx148NE?Y9d{jzOTG$4 z4K1vNHBp-pZhbghw-4jx`^tT|H&9R-re|N9*2p@|V^7twGrpt9!n`l6h<@_4Du0zz zW?pJ#Cvd3=kAjb#uNgp|i@C+$J^%nPVFqe)oA{aFMcL(T**_E+u?%w}g1Mg{7iu*c zKEt;YpIFbAxHgR2?ndmN9Dv5L^Y^>E!NkpyZj;Z`3=;+`h z+>gkk`&|YLOM1Jc8GtYAVJP!smO>>E!EhEz5Ul545*?rb;r#Gf<)8-W{#y?=xKT-tVnss9F3D-);(CPDB61r4FV1Pk5z0p#B(?If~$CwR%a zDxhE$0EAG;ApisM7hCWb2>>szD}?a=u-iug9W?KCsTzJ+UKWC2`P1$%w(M_M=o^v! zlAqD9g`~ff1g}X%@Yh=CP+)^E!wyRH7gi8}eP}NH6QS^v0w7I=5&`ty5i+$gMFOOE ze9%+95t$?aAoWTK)*FS;;J3Dv{K5iE`<+Ckogv3Po=-RLvh}~+?RP8h+&S*yywoT2 zV?wK0_)YyPI({&iz>Wv=P`_J01NRjTDIsDcbUozq8$T4UzDfUlyn2Far74hse z)~q$2ZUzBTi@^Xu3raT|LFrdex&x8gLZtDK84KL{RSf{-18|T>Qb-{SY1to^Gw$?I zfC6X{uyKNrp`ru8$p9dd`hYZKNX3C>C_P9&%71xSrmBEYN8h3LqbnV6KM>LL)*7Fuh8QydnSu0MG-;ORV5E?~gCiA5Xs~?+*?P z0Hp`g5bX8BaXH42*)IqSbpK`-86brQwO|1uQ1xPO=pK*QXE z4%H2yf&zdVQpnsEgyW4sy#eLVncfUb{*KdE5d0M$D8K;E!q-Uz>=bm+HAsfKnvPhv ze+U5|kh&fYP~*){@=8Gry%}uLKO`jtLh_)0kb$NUnVNqL$6u9%S}BD8S^8J~|2H{* zIX0mm^z;6w^3Fe41~0__Msm8-2+4C-qC1}7fxr7%nf;4khg|os)zQP+`e29qP+$8% z_zA*TzljvbKKCF%tLww+CxUPHV{-g8X0X&Y;n*vD#iegylU9 zyPZs6CAy%CE4&BIE#Q8uQ|0k&Gns3Xuzqdl%Gcv5S*A`2L<%1`M5P~a3;SN!wqV`` z1YQRauG`Qr27%9qq39IV@$R^>gwqdg_lg9K7Eseq*Bc zcUY8C@m{6FA6T1%e!^1U+ZYHSmiJNTmh(H%?$A5ffxoqjMA`kY0_F$Slckfl>>aOd zKg2^wle63oS}&XYSGz{B;IJL|TV2<9=Su*gT^r$k8zFea-Us==baBF zHSk9lZY8zt0leR{k%3J4OKcyTcmF8Xk9t65!(Ui}sSwU3K^ih7*cM;5!EUBD@!L?a zx0xTXltc76+k_e1GzESd1qrNQSl<05$Ueonlzf`&b36ZvhLjNf00zSkD=EJzgUq2n zA0cX>lfIm!z1MBL;vBT!C?+_&rhh;jeh|O&_dq7tOTUZavYdZ^yrhTz!ubPoE#+P$ z|5u|2{Q|DPVVA(8#|=`*0pteL0V4Vp(RPD#)H=YmpK}nxzdYN32h3oIrZj}Z)&{8o z=68!+qg)V|&fSma+$p{T4WOCd#eB<$bD;PGOOH+C(Pgj@9(>>g?HtHM+6soc(?|n& z-1zZRj|O2M^zyd#v$cg?(mRc3+u$U43mQ1tfpF*we`tRPP7{1O>PO+4hv_IKbs z)M7J+9$d$c`x*A!xrs-3_j?1TN);~Qggtb51gnkXJH;Rx?0_rRe>5q-k9Vk-e#b03 z&d{gEsP6B4hr~&lKOsZ}si3Z#gZH2q(*JpAoCg75_cO4(WB^5wywf<=?{?4ujvZ4s z#CJ%B%*V?-_8qu)MS&$G{;tNZ^IAOA(swQG;5`VtX6}bv6F-6b*Xt8x-1mQ|{gnRB z-0x!>l3`o=z&j+zwsezsNsjFR;i|SvY#;AI@FgFOX-ST02@QPy&fPO^jRK+V?(4pyv%L9rEP=*{n#bon|c`HZs!#i3nC3=^J=DmZpUs5 z;%y0~u@9lZLg3wlY>osFX$az?9Pgq$p@I<8z9f@^Fa@~I9e9G$d&~^r@|BK4liovG zCczj9-WGH-LJkNAPFO%3C<>+=F~rsJC$5A;9dmjt&>v+Lsp)R?!jqS=o4{dcOxf<_U^ zH4aQ;-PNudBADEQP1G|rfK?I8F-95%lTF&0+*4}vRdxfNH0T##v;-pUj@Q&E_ptDhE5>Z_wENnGel=2jhy zkB>R&Te%rJ;hu^=cn#^gvKyJx#O$i7?6Zoz&M|pj*c0h^Wdx2Dv^gK?(j@i zpoYJRG}edQ5yIIl4@UH@*Rk4+tUU5hEm`O)jt4f{ut9baZ=0D8b$IhZ5 zuT!DP!?>}fy@C;QCd|bz5%8tYfrLD=ihga|SODu(+(5jgv}@A&aicQkrV{dSlrXw} z!<&3qMsBk-VeVG*@4zm30afvL0Q&a_j8QE0j_ZOILb0kpzb=#^)K#Gk3Y4{n{G48m z9xf0Wv%lpO*Oax9`@!skFj*nY58r`Mj8O-HB|oRc{;&E(Caf2?MQRmby(Z1gGGBMe z(RSHq;YVhmu>~Y3Tj0QBP%TPX$#Q09uFIdlUbm2n(Q=oN3Xw8J#rW9aQY{roBP3sd zq@wkjA}VRjpFB0P*-V~Rkd~4g-s`o8Hh1=DEHZA;Kv)41I)OXo2?wEZ?4F z8-Q6>!k#(Mp}Z$h@r1&SfG~Wy&3|x+23Y||k;+S$@HLVH_oUdXVRXmaQHdLfinL{E z*(nk#`|TMv@GVNw(oD4_lmq*4R_M$DPb^lLMsv8(;vr!{(%dlc$Y)WwT7B8L2|ZLk zlI*-ZvGe`%BMWwf;>W<5hI|2!eBk}@|gW?D6P0gmo@Ou_Py%i^Ml6Ks65q_vio zF-%<$gN>x$Q#^t5uG98OX%xoLrBF)A6Z{SI)h@Se{NXjS15JWfkEO4$)px70*xo7S zAfXP{9k(51@@t6{R$+!qMQ2ye5b+Js`KQs+_l@%e-^$js^V$=V8dj3PPCHARSjotI z11R{Qr{tx@jjT;s$yLm2@bzyn(;ZHIqPq`crYSjV_VtfO7Y2Qs@22`lV`mCG?Byt^ z4`r%Ay&xL_|ES@st8AW#6(#OQ_Fy1Wd);kh#6qNkUy+SsRSk#hm3D;wJVB6*MuNc0 z%g=8D33~>wYRhjN8$MMD+m9|L^5#u3{h9c0GVHm1BCBN_e&?OLPJv_L*)dB`P%8lo ziHG@bp4M~; zCLS5Dc|fA>ri>I*Hi43gP~LEX=Z$}fFae`*ILYAjiXwwtg*)%JB);C~a+TW}>((!n zvnOHtP~R7!k8>)Vxjgt%+7kIDw+#EM;bt1+5{yR}V-B~lh@6@2n12vVt4ax0G1D}% zIDfD&iw|Z1+8nw5rxg}Y;AF>Ff|kWB@G!YZ$+sG zahG21Fr+9?h;9vHV#Z3qD1??IF41wfIt0JjMCxP7D4^!-zGU4vn}EF)4C%9#xG48+ z$<$dxR!YVSH9Oa|>C{jDG0nmr;X01h33T@@bb8cS1FL6>UdKE6(lM%aceWk)T~0LI zRUI=p8dWHE|C+^Kl`?nva0}(cfNoaO(|C!Jq*Z!y`uq#AwxRy|^eW{Vx43=;`i(2( zg&$<5L;!CFkY8I+*NEjv2pbAeGp1!~t1h9*HXgfZaop6s>)a}J;lR>#or>#UJmC@kB^vrL&z>o4out_igUgLDSX>>C}*$ePcS`jWAf(^ zPgCND?$(+5VIf5rhvP7RY&4o05n+7A}JaHc7W z(UcI@mr@E*k>r2isZZat40VS!R9B5T(f*_~cqw)y*o3gLz>st0$PlmdUjFSqE_aO% zyPi)S|4mAFPfEX8cJXf`%D&oRHh7@j=^Z<$2a$>nM~lFpUe&p=gboXJLAxBe7w`BkdHx)R-|#x^6a$ zQSKB&VaGN$z$)pS>|*@;SiA@F8WojiwApF@rL_m!R#3X5g}Oq^#;?t~ z)C$w67Xny}>gubaP`a!`7h9WNJS=GC>CLZ@03uCco%~{t5j(hBrBD3~iS#MYTbV;k zA}DCyV zm$6I~Fl~FkTo{!&1^Wxbh6jfv)#irg#X?{9Sv+uxbcQ=-W*xpZl{klAVDlwL zEKPg|Yzuwey0-^@g~naYn{)<7@8~vi$xl@>SE{F;y5NbycHfsv zeQnvxo4Ba<3T8%@KA4YXQ=Sx^VFJ$fk-DT6@RAa?y8e_(}1vc#P%K&9a*V@8u? zU1a%IBDtdCnJ$j;tO(||iLO<7`k)t+PP}p-2i>GFx|tN(YAIFv)WJ)q%n_QrC<)Oz zEsw|X3fb71Bx!+;70bRdZ-9qNTlx0S-#EeH@sVZ+p}ip_r+E6sbDCanj-_6HJ9cx~ zUhyj-Huq7B-P$89d<7}R)V%22fay51h%8EDJs4Tr+(_erMMun3#DLXAb0g{pmJ~Ri zHGTP6l%U^r9V&Y;;ZLYAMerEmsX$`5j`q3ejn>%6S{M#my8Q~(BHy?-f5NAQ_g>z_VDrB}Uv@J9H)>tT z$E&szGcC0YRvH0|LtpRgW;p7eMt3r)J!;#O>Pd?dA@F8G0VDB&T^Xw13*+W!f=7b} zla_THN$5n9Mw!RCLNFSE9$!}ktcN>BbbPm$jn3#FzReA(Z&=&&)Y~E3UJfJfKlQgH zSXDPWFFSV&m73t{o%~t2AGdW)_dy{%WYh~-M^`piEpIgydMD18-!XsAZ zYWwnwyOgoOsFGo-ob4??N2EUJ-B53?X7EjzniHj!8+HxqskmU`{5{03$SB6Ximb9! zIL8R&agGl~L#M_k;1RyeA)%yt!lq$S^d4JBEZc2( zvDUXg)Z2C#Pd#V7JI#dm7Cq@{*`z!0 zdeURW9gJtyb^f`upS7J}5y(##SDT&Z@-&}%_!YkN$h(}f>oU$=)6wqrq9z2e&fzrT zLx~Nv9m$LU7BBS049N}s% zl2~e5yl^iyE+YH#E~X}63Rmkt$9e!?`Q`*sIi4ZyzWBbYnNp+>{BuUOqJ1g(ub)~O z%G>$J%f`@84SF7mY`mucjJclt=B&oo2VkUSQ+YWGncTzCW z(1f9k{-9w+f_2T(%gD8hTD5T;G!!bix;p(W-?z|u)5FgCR zRUyJb5EJ^S;P{V(G*igDKCB{#q~Cee!%kb$Jk%iH^wjk?+;o3Kyy)Ppwv%zPc+_rp zq~)3NRS;7UX_B`WHs-XMVt`!nvmwefK4;{MG%{C#9%s*J62@ooCq%3th~91|tL@s! z4iTNiBU?vfEGfjV)~MibiNMMQ`jx2oFD#VYL%Zqy^rdF8Ujkxy|*VrFLpWDXFl3bDG+wuF|;tA=XjjcHI@Z+1v+lU8jXRr^QIO zW}ZqkZnlaY>tNoi+l_D^^LzO@E*;Z+Nw8w{d5_RX=Ff({3GLJ}gE#M(Qh%zP_YP%R zgoDy*%XgrmkyWc1V|QYiJ&ivJETvJ+=)jzBwxoYm8sjfJ#AapWH3Sa50-N5B^$*c)l}2 zVMsn=>pQ?Wb0Ime;uf;K?wDtBn7MUC~AdyeU zV*k*3rX5J?H=)tV>t(9-;OIttEzFBI^TK=BDYpq~l(&)%(%M3soY;KCp&4Oj{_Mhj zrYDmhg)nTe=SmSG`=;bp_+BMyp1&1YF@!%R<9#=bN_%oUJb-VtLKiJ)v5^Glof|)W z&{sEg>|V@Ue7&eVv2BYr#R2c^eu#x&^v!^?sOC4{2&M)$lJ&MfTVmGxDm!v`{?dXcIjm$UrznMpqZXMFNY!u|bV zGS&xrvr8}12w+N;-S6wZL~45{+yz@-Q?c`{(_3+$qdLt^#x-w2<@V^VDX|YEU?NUr ztfo1lW$)OWYsF=B^w)l4a@jvyx^(FaEu?x(K^#f}Dbwke7ljY^59)l=jhM^5=Ehc9 z)}#-@E`xbIe0rW=MKt*E(x(4+Tnj+(L*r zQsTee?lTAtKaLlj>$VPF!+*c1tT7tIro6U(VcEpj%icaqBQc9Q?XgKlxcTqGS zeZV99$M*I+;6wqX zqaCuFFMieoBkjRrx<1{>{HH97s;90}zp$HAXj%ln_S z)`IrKr2CitZ#7|qoF{lXU%F2?LpJJEd_$0}U$=c2#CjY!O?cmn!izI{cbn(ypiv`H zjwilMY+J?7l4Ru9CrM{!EV`&eO(9g$3zN=!nO(A}W-|J&3$B9aqPAMkCDl3O&sFRY zTY#iwX9Dr>K+eF1wS&$iCI{69OIV?@W6Mc=LZ;}>us4GDM57fbh$A)UB5yJS)4Y4v=`!_h(;k^B`rICP<~o0nez9Q2 zYVPP_Ot4!K?uSpH7y5`r9FGF0UGlx6SudvPBoa|Uaj?BF23;)jhB7LG$EupfWM`U=(Aa05cPZL)tXLl>g)n(W z$tqaf?x<~!d zy^%^qOLvsDj#)1gq>=0QHe2Z2BR8Js>~R!%JQH6C%x(n{ycL1;tE4@>*0wK&rf>fo znusR)oQ5s4#O^i=4!ATd^%?qbr|p7?nZSx5f;j?A zGT;>`X|QT!mOL(%)ax~fY>9tI>B}A@4R3)Rb7S=1BG$Bmg15;Jk(+>O^;deZOBz;p zZPL8#f5(&m1C-8ZL!kju0Gs4I3VfwhLI+Ujv`Zts!^Ey{?;TkRk4w%f2)8DPMWuhdZ@56|xGUxxUeFagPXJ9SY`P6e`Ypm}7GE=Oed(?0KUI&MJ zj7-%yOqRItgD7FPIIiV;a6D|^0WE7nwa%b{ucOfHVtV8%YX9~x<<~I-Dh=q|8*u(F zhkRV=q43j7F!KJ*UoRPnXyiUU=?!%=(wVFqiuVg-zB$P2XoYZ;`GyAi=KKY%r9Ix! z#rrYNu#>>A=aJGI`sw9cM32)SkS-FAw&ew$;9aJI(?BcxxXO4Tl{1TEHs}pYD`>ND@`kjCvE)!{UUY`B*NUo}p`4%s7tw zML{{WBFSZF;Quw3#rAFUajFB=TBn~L~2 zYBnB%D_wARmu)0DhB=TU-AdU1Udcgtd1{8FwUCJ6Mf$hCF)UdhWZltdvDD z8;vm`t2H!r!FW;i%mUI=#M@$(cO(^rv2E*K?BiDRoM&{G9~GXodEYC?eAvM5mpN^& z+E5M@bk}Shq|(vlp<&R9yO2JX$wZpQ<0ict@1Acb_JCt z+o(&L=KYgI9c3dal{_KF`2yybier~5Z!g)O*<&oXi|q=^O|0rej1zdQrY#sIrmUAV zANNgx%J-j4)r@MESIu;ky-TVBl^M31>LiOPV@;Q^)w-Up(e& zM%Bx|EZG$lnry!_wh*wUr<*vn(^P|>R#Ww^mNk|ja2!?f{^ke%5q_L}ZY;0TV#o{v zpJ?@M`{f2EG$*Zg+}EUYOSYYRTgQ=XYYTP^cK2a|NFTB(yNOk1>5r@)Dvicc!%+)q znGdd!RHYuc@I|8}u}>+x485(q5rHX52#v4cm6O!ml>S;8nm1UhyFshmrGK04c#Lb4 z7{0dMb6CD}D@LAG-fTg;kfTRSdD84jWnu`6;#U7|hE4Nnjro==fqPRu&cRlOb)yRt zTBT}?6Y(c)+mS#bY`KQjEf((nJ(wZ^is{4i3MLd>PQ!@{rj89e9)f|;pSvVP^ z=xQd7yxO*YbAA+UB^yO_6upgcK2pA2P|IMSYW)c1sr$MO>PgS;#Xga|xmo?=UiD)y z9wF@MH+}ZR!u4p8*UQ5<>05m_zmSkwI5bW2<_l=e6>w5CTR%$Om!#zcX6aO8>F(Pn zQfwmYrysnxBq#|PIdIu7b5`$N`_fgM(7zp_%aJm5r_n~@vY1S7w2{0js&zTCth9?0 z2CE8>$9gxkADnKkp1sX*KRWQx`Pg{!t?8cWoV(jxY=uo@qfO5kr2H^u19IYuWz(wre<>V{yJXlQY5UF6iZ zyJ!y9dgGn~qxy$e9g&zSMN&iOZvKU<$EOIL-5QOyHYMLsw39AAu4tZ&p6fS35_MN9Tu6PooqL&Bw^L>4vK8SGbCca~rJYZE?q_bg-YBfm)Z5HKopiFcKC zOBGa_&pgc49jvK;f0EO0P(Di7l9icXGDbii)8RN$swmp`m&q zjl^eAT4<5uj>mwum|e5wZvAN~@U)QAJv#JNk&J7-tiek0t9AK&g*a)oS3JcO3p(;9 z;eQj~oC-tv%W2{zN)t|m*~2{I&gnHZCt*#0@MB6&>jFZwSalb3su8DU~tOVKzu2r*bb{ls~v`4x_Eusw8jwNViA+J0cA7FdLG_tpoe@Z%P*BaG1 zS(KdQYOKjQUvnM<|1Yz~&nJsH)?krxJTv!DmPegq48!J;>*fpb!<`~Z0_BD=oUn47 z7luYj!7F=A~0lOX%Z>sk_%30a}`U2Y<*p@52TGpstRe@ zaj|SfBKH*Ez9pz((BZ_J779rz8Ek~>E^eQv(0g@#jsuQqwJ(TR!^4?7p1bdsg$Hj7wj?kUOM6hVdOx6d)Uz9YwgQ88Yyr4`?_ zWZoIa>C>S?`Ru6Jx%yB@VWr-ihi0#f>h9fw8Z}$*V}atp7^TgA=s4~D0A}nlEx(sp zmD=Jie2T2pi)t8++g#6|X-Q!#J>!M0t;NhT;aJvfzM@L@G`rDOOPlI^NI6F$W)XKCi59P@SF&QYh-F-T@Xj{1ME210 z3hDxH&=L$rL>v^yOba2{%O+#8T$D5wJ*f6XmS|Qo!G@B`?)b_RYR9AEM|34uX;hs}@n<2pOTlR^FPWV-(N7PD z#wj~@ww^uY%ZT*J5%nZ~@9{^ePC*C+P5KRDle(H@+rm*ZdocLFT}P6;zlTF$y@U?+c^Z<<}DgE z1NtKz&#~f9b8Y7Y;Y1|sl8if|?ca68n_1_>K_6%!w>87?9YrnAbP>f4MrKB0-(@4` z6-Jb&7vsYlwQj?Is`d9#a|WJx&E~z2SdczCv4>D()gK%wD=eqA(=+vtsfC*HwA?{k!yu z0Um509EgdsKdv`^?x|rIC5WJ9)qtZb=_P&N*V??#bT|CogpH<7|5Ucr z5b&ze`ldgISgNzSELj*<^Dena zV3T)Bq0J~E;z7n-wg497)!+iwEGx~cV&P6SF0Ws-I0undx%S%?Os+eLFWKKklZ40! ze@zhU(_klR9ZAlDNx8OJs6$tL&7_fw-5avt7>Duj@&fP(t$Q(d5tR)tnW^y6J`Z11go_D^mDT<&Gd%Y z*#F?}E#u<;okd}2p;&P(MT)b)B1KCJ6ewQY-EDD~V#QkA-CY(busDU{6nA%Lad-M` z<#+ympL6f4bMKpcATyg}CX-}lGLx@%tHE_^bNf2b+T~UZ&4IdxSt@`3tiCdJgVzfW z1pzx(F}x1?B_2FLBPr<{ZB3cxHYI()aH_UtCsM+K3-*!l;11~`AuD_0Ro$XyOEnnb zm3SvJz0SUSh3b2S@^B`qrI7*@CcFUI9xoozVf2IN7|NJbgXl3axk% zTr88zK6YVEtviIlX=he1ziksHSB>L+Qg2?(N)9!DyWmsoRG``T)Vzqy#<2F=U&f!& zDaS=}Es>kS>(U%FylkZ>sb@j&!G~Jz;WltU^Wkjzx|Pvtn`Hz)S8jzgC#}4~TeBib z?r3S6ZtU;sq_Ij8O{r%(jv*nzjWxls)=jCi0Op2p$#FhN9zgv&?MEuXk`L?JVJZ&@ z4XX|`AhOx9w%TMdfI-PwJ*5M49umRA1KUnZ&mYS(FJ$Y}^1sOPV|y~2 z=t@BMfB#Oow{{pu9zgUQ z$E>c>$2Z4z_{~44{mbrnVh`2+^ydrd33|fK7vpQ5fpPlH{AG6c-%Aoe1&5UltowHN z3rH18nvj@Y-*WxtFY9M*-U}u6H--WSs>Gzg*9TLi7DXSx5#775!4aa9e-%If4X?VJ z1Bd0v4>?yrd8wMbtqjSx17R-5x3a3VkJIPJDQCNHv&BH2M!RZzxcj-&RkF-xWW{D` zPcup*Pws0Kot4w4Xn8&BANWOo8=D({uK=8s+*^2kSr$tpyJx%dOLVQq(++U9*xf?- zc!#GtNau{QB=bIs52b;vo?(=I5^(RA0>*o29heCA*4u=33Vd;JGRT;I)leb3N3x(+ zaDic6qIiqMt@0DGNWzI*XS8#3AOF9B{CBMyCmiqHqn`4lNBmM03VLTzNJM39Rv4P! ze+-J+4X*PH|6?`7!l374qdoDBTI1&-yfwiJ@MFK%PTb!eQtb<#@rwSdZ6cDjH&i*7 z4Ds3305J*fhIo=YjSd)xWF<^DkzgHmTT{rVD1+h zWbt~y`dDmGpC@6H%hm z2mc01BoqgdYS(Ww@>>-@QZ7Z$EdrrD&K-xv3F@zEwY83|^rR58-iX1SIgW$fCN}kX zs7iKnIhhTUeB;X+!t~(nB3ot|tP*EeR%fgzgn2;-)E%Zshh*|6itC?8;>Rzmm3C{Z!#)<1}zq@dD6V35gA;UB3ltwj#;S0ByKirupB{TUl^ZP-lp+ z8_Bp_x$;+Yo_qjZR&Y#2Ay%BL!JIvkSRd^b;Lbeb$C5fJf!-g4UW0m4y0Uz8)(T!W zQM-*Cj*d{dtR}^7*-Q_S*eRBL&Tg0n8fZk|-^bVWMA>L^{_3-5UVN zLO3|FK1gp*s+cY|_4G@HkusI8w!;8c)F}b8Akxs@8RI=rTN&!W#T;Ey0p!}Edxnm+ zcC~KQy6JpOm#(J#1Fp2%k;Vf7_JieFEXAYsT=?x<>AIlNT!dQE(OC|YQBaq)hq@su zdm>tYIbqwcWpr`dd8wQ6aVJV0AQhgAjiB@D2*wu@dgHK$>92RN=x+O&UFyg#p133Mej+K{1GVViE)@0S{bH3 zcj?(qj58>g0`I2PP@vMHRWvugG$~c|D6P@1T{lu2z+g z8n%l7gfX^7D(lXtoM)dJay4)^p2s%LW@k^2rq_YD$9BdVvS;k5=51}`O1!#%jnlL( z(}Dg6Tt5HY&vPV$(d_h@Zf@~4+xbgAyQ_c#ik{(ACqjKR$i+kqaK?_UOxp|l*&iXY zdS64JgQC1qrkkCNLtAP5itRmj1{@t{`(xtr-jvCKow`dU z1bRmJMD8A&_*(qc{G>Qe+%X&#pjw9xs(B!B__VNjJ)hEUDI}GqklG#QVw-hfO=a2S z1((D|Fgi|o{zeDGuDMfnO(`lN z@h8O3<_a1uU1Q!WJ9d?M)b#r3l%-3YEw^5LD(aje*t>FERTw8TV+xcn4!?b-MJd2M)vd2+crXaLtQSr=881mGBy zn;peOmijGjwYvFlg9~d=!@7hH9^~JP81PzKec>OnaBF=SIV#i|6xfOQy&v{zR4w?% z2V>#;%OqcaXV+Wu6sz6fy_Jr~wvR`WMJ(d%vGe;ZF5mu`&tpC5i%s*V-d73tLa(oY zC}<(r-O-WajcjLse)t<9QQa;+-mwVout=up^I!ww2A~P`cf-HoS^Nd75NJTBun12H z!DLx(jP5I1FKw7_=~w6KqX`U@q#AFjQ`(@lHihj=FXhO3wI2tp;;xDu_1eAaGTLM>r(yYYe06<`_kKiB+49=ZdlsRXvg2ijGMLpo> zd5*CVdzYWANDnW1kdCG??2JPU%w$A9n;Mizd2+jx5&Lr94MKpuMvsx6j;3H3xrT%l znG@jD)olNMntMrhz->lUjKaH5B7@&Uxueq%rLh}2w(^=uTCgP*jn?v3l5w1RIIz(g?vIxj*v#odSjS{mt`Pta-E$4xEKe+Fd z#B&1&ed>jx6~`#6=37bkPk~E66^;uF{Yizjm&8t$r>KJ|rwc0nZ6R#3m2MJ*g?z(tlS5%rgK{T%xCBdg$G-u$>jM_M`Va@GPK+l4hG&Iin?`Ukmr061zm{wa{dPu!|>-o zS$)VX@-WC5K|D2j{1Ao>_{3Wfaco^EK=eeDa5OeHWc)3uNlFs-gkH@ph)+;Sde$75 zlBWql+%i40*zSV(W=bx6%!%Wc1#QlbVUK%M07~dB{BZ_*=BE26fTC+&28RHTx3xKe zixS_r9^G$T`K>R@lVedOrJSeDifDy^`&PF}h+xoETcIROcN{z?4!?XDki1B^|&LXQKoG zxd2oIr(l>_p0~Scc<0r4aHg;!Vy?#MiJ~gc`E<#0Xy{T=J}SgZV}kvuZchso%2zzQ z@!iUrg)AFykVu+&`Eyz%4Iw2{tkkgRz_0dK%8m%31^g!~{n z&q(jn@^HPY7DaEyF_|9yj8Z4SKDS&HIbmow)nkpj2~!QT=!t{U-14VBDceoGD3WP! zU+9S315@U|N3%7u7Mf?_l_+nF?e3Y8*`l1 zB3Bg8%CZs`47!MynrY8s8u!9AS&8Lko+;1TiYzN#yE-cliD_R$Zb39j@I6e6y)roP z3_C3~ncSfU#)%iP_8%BXK3Sd-9lTXn8_pl=E%b1c(k!3%kFwW*o+X1`KTSY0d;D~< zT%r^vgTVtUP$& z62tE54!3R9E9R2->R;lJ+$hBQjW|hLHS#|#pejHpJ~R~t1(*n}TIqj@!+(JzlX`Br z!f|(1dld%myqNfe`g*T!t*5=dEfV9SoL*(4s3F7PnB-UBnT|PnHfK}(3=zFu>atlb zB&nt;)g?3#4n@{VV~-szL^9kbfTpj?q`5Tkc#LWDDA@KWV6AX8t1E(76p)!R%e^q8 z!h~EVBvn>Jkf+Ge zlk$qlxIR92qM6lC3?9JC@Se+1W0*}C0jQN89J6($o9kWrUff_zD~Rp51Wzl*nE+kP zdccdVBMY&b;e?M<|6OR^Rp-m-FUNHkipNp3i}bp5cqpVU%0;B{Oi2>v@Kjv-vka4` zl&_zcM4*Ml5**4un|}kofii*}*6)0BdQhd+y{toS3`$f|oPs{vQAn_)c9odfsU)E) z0%%?;J6jPdww$Ap-+Eu?awSc6q}sL#KUy!yE_K}(S{=>Fq!65|a-1j&b=-YL7!}}; zzL;F2*A?-Uw@)8UsIx=Smc0Ete5S3 zh8eUVEr!jfNRr%66RCkbenekbB@yqxa$-kLa38ZQX`NR0m ziTgu{cca{^!!y1C`Ms$o3+ERy$C%05gnZ76Exg*5-v$dk4hbU}e zTRtAi91D97NjG{;j?;#L-L$1Ft-mo;H5scDi|g-REF_r-^KA(>WTwbGb*wQU#Ld-m zC>RFyhf$zK(TbXq^700#M`;NwVx2H|@s@UF8xtWbdC_abr6T${Cs*b4TRRpQVQ7fj zWoNRUDq7)}5+WArr8fYQa7Lnc?W77F(Vsve(`#HGGB+uBgdxhl0i~EyF1+*Hy4$jk z=8{l93{?(t7Su}+3+Az#^`v14hc0I^u7e_^1xPnEv2~D-ab^$>3yh`Br*J!@KHnBg zTusE_Z@RXJQ#~ebY;tc`rb z%05_>eQ-wH9QmsY6}K^ibXmL23C-Cf* zpZ`^@a@e!0{T`_tevYl(vvCLLA~>_P`>zN*gUp#>${s@{o%C&jMrl_ zwJ%4VBKJI`kas19V-*gTXKw%eQO*uGU|iSBB&V4Sc!;;nA?x z>bSYu)aRaKWPRHmc$rE^fWybhuqghOZN&SOShI5TRceuGrjjPg#JsKw9F!QuK|k_o zq=sd8=MHsrYn0DD;vC&QXMd>M>P)Xh^dH)cgq+b8{LTf7MmH@x=W1D{uBV|M)@-| zIw7mR&scjrH_sd(*Mw~cCY98m!6Lk4_m5B-4%}5Kid>rDiOAhAATqYF>U@Du742R} zos0|hylJ9HW?{HXG9HQsgqq{H7P8yHvV1`2Ot3h%jR%V`2Y%@Xb z1I(XfR)F6xqrue+o9M{4ly9hul@8`s#R>W%3SE`wqF>OGr0skT4th6Dnf@cjw zHTZ>!{a>gRzfh6?k5F@dx0=`GJ1ZGKXHIGoy%C(J7r^EzA)SKXWLTPub};gu-L%J? znQInRHv4}uI%Xz3>LO__xE+o@XT!@?KdDCP{~wC}^!?wV=oc!&--!RkC*PlZ`WNE= z1FBpQo;Tw2rjn!)gL{W8{_te26-u-NA?;mpXdyx0e%2G+(9uCt_Xv5g;&y6?S*0M{ zJh(vtG;KkOjib)buEQ&voj|}>@T>@h2V)`0FmqUIdol;r2|U#!PwwG#WbmwO&GrfD zl)mGU1cb6{qny8ROri8U#v(CM#nSuE1fWYEjp~kkVGXWCU2_ThIESKD7Gyb#f~wF9 zT_C;ZURe5GWC@?=0Eb&WsI%Lk*v^+kb|oMOb+m>H|`G zhLz&TfmWgN#^gqE2VYO3TUe2CC}d!d)1=Lr$PH01v+N9`?3oHgl!ZtQ1eKPop%HLJ zg69){0CA=;%h-m*lz@Oi&%m*Vql}GCun!QSwIF8<89ij>o`|h+1tfQ@!a9pz0LWvQu>Rmd0C zHq^Iby3!grGy_K5F(lGmN7e$z(%(}s`a&Fyfi97PEv$uHsWUhVbCE|lHpgY0<_;*o zTw6Ucvl(qn-L9bp%T3mZYSPJG!fYwbJYb(9rWSq<2_O~xIvDx!gwSo(u%u!p{5u~O z+7*S;x&fW%HzF=dDUw>Loi23BoCH0w_Z^`K=NVUpHSfhQNk}-Iz(@5>C$doR+SNp+ zm5H&T;Zv?&v5@=>>uE@=KhNMJL1>+F9-=`n=rsysD`m-5-*<8SR$oax%$jUY%eQtN z+{AY*BCDkzxFna})wOpM>hMwJSBBM}6Y4@LPi}T-@WgU7P&NXTqpBXg?Qf?S!c}n;6 zJPq%x-hRqHHcvN;GJD#dWN@c5{3gs)B&;hU-;$r4ONhO(Mv$zt_7>Z)X3?%D!`97FeW+mJhEoHN-$$zt4;h3RmsDPhG;c< zYq3#m1ykHOc7^9lltSq~C9i;8c98?g?EypfG&95M&5>x~nEa|(Xhx1DX2i;g+wM|H zdBknZlfM}3%vPtc~{bePIMF(M&^l@c9^BzGt2 zS+Xv6>>AQJ6LzNcb#-IO{k9mttX}p>heg96LBuHs*npAH=a2LEwu|7AdyITZy*>N# zPvEj^F_@8C*T^_V)}D_KI<-P~bH4i(pk1|wbE{fAxkc}5zcnakwwa?aYDl}e-uZY6O@#)}p-f-^4v zBPxwZGu%p&{znbzzqn*JW9#g>8I~A(FV!cwKcIFU<@J@vY)YDA*ZKtjwMp@?rd`s` zl(on{HTKv}r*)j3FtybTpRtIOpv}!~k>@cuKj&>2k?ZH7Nylsax?i?Q85@J_I2}AO z^dT-rsJVWN;DdvTWI)v+XLROX)X4?kte3e!f-G!2MB+M)I{# z#%iUv6(^|G;p&_F??JnsSTj+e$`2zNi{F4tipZ6n;;5iymARhElS&cA4ZTt4s_F{- z#k&k*6WaeC#X!-X)mLy}O1`B^O3S;U{27^o?nMx=>o%uL`{|O;Y-*brc)8k4+9LYq zYyAVqR4>Zo-RF{x?20O9oF`wR=AY)3Hn*7@vAcXCuZSK%wLs%!{qVjnT^a!oJg;RTSMF6_|TN-AUvd7vneXs(yRqA2ogwx2_JMW&iH>Yt~Hv%vw@WiZk)Gns|Pm z*azr>8%Win+t#Vn7QUVEYEbA!HvFq!)0b+0vK5FP?AD)#yF2 z2|y-w{f0>eYAWxn{2un7dw5P;PwKjypkB5dtGmmt3oQMB3+{{l0Y}Mq(-$M~x3aQ@ z>fxsE@Zs(454fu=8*n~a%C_d-S(mqy@l^`lr~NFOnFU6q4U&1w{dNyZ*%abpif;x`$D<2f5q9*eUVoJ>zpBnMdc0xpFW@IE-1*)g-oc&vb{H9%)7W)b7m+J*R&BOP?ZZUTy7@>DasufPL-I)IRT_ zqR6?&suR+#)>$C2jaMI^)1WJ5c#Q}sAUuyIRXxbU;kk`{X6B9lx{!wIEtxLC%x%zu zC_B=Xfxi;($qB&3Fhi0(ICI(ri&>0}Y;$zF>#Agh{QC14 ztqYSf`Vnq03M_S8OwaqNoK?l;lS5{Om|Rq*m)?PC;|5{^_R({NH}%W~mkM+mrQY}$ zH*HxO+n(S0!s6$*q5MMQn>c*B03)~744d{IO_I}#2o~-%q4S8Cs(OVVaB3sMO(2aF z?WOl6i?iM0vcQmgjkPex6*`DqWb4C=Ob96|xn7ggh+9@($6*dI%wjhTx5wDDp)7VY z8jFt@^zGF4d}1AV@ZlCIw!&k6>fq|i{<^BU=wwnR!86Xnfzvsd7%8T*zTvBbE`8^| z2>GSOtp;yAH}RUP{j6FR(C>1acVrlH2=3OMMjhrTl#l-Q@S==oX#rJLuf#IWUcGU$ z@G~rYxp&Hq;e?=oH41tyMK$>J){E@-YH@3V5|((RrS&G<~bOj z@sxBTC<=zs@%lPIl|W@-;JjnlvD9=NTh6=;*e0Y?8!dUoO@d!jE^;<*sufApu(9Oqo@CmBi}cK?J6F++na3ZULGC$J z$+*k0rjq4M(fNG=)u%B(r>TXp){eB*RFmV6o{Q;dgaZR!hY_DxQeH#T5qz`ZM$IK6 zu&GdZtclkyH#LzdGnnEQhwHSn6RrH7W#&da&rh?2tVX&sBEEW!y5dEUTs0c^#@2@) zC}T`Wv)LW&IF9wy-}ta$7M#PmR;2>E7R>YV+weR^TaO-^Sn+Hs}%4g z)CBXWlgRG;!){QnnVsRBmih-5My$h4e6=L<#Dfy0(iSI!O{B& zIjt7F8Jo0#TImZT<$MGvjN?SP@!_RlC{LAIJv0;AoQ-EEhk_x!ro0DV5{X#q;Teod_tZI%0IoXk8 zW%gLoz*_8H34H8Oi=s9``W=otr>t&fD>b;ChUlLmZ80-a`s--pzB~vnJuP^p^T@_i zuaIeD12X4xqvQ;cRO~+U5WNZmCdj)13o(Y^L9Ie@;|%CEg_Z_>@ao@TQ4xb%8)9E| zU(MeYUV^t?XT*`6du%zJE#Bama)0wK0gk0N6WzyMb3Q^0ch)CYlc@_8j&)JqNViX8ocRcq9X z!J*MfCDfrS>r2h0WnSeXRRbgaoXAbPIoyiQ}-H}C3pM-;9q#7U)|x**?Ro(SrNgW8}?*C zsK$}!C*L8$@3Q(u?~*VsMlH{aUaVoKXXw9YK}ZkT8GT2z?lc}rEhe=y`bzl++?asN zIhcEQ0y-}CD5PR4OLam1IdI?`AK`dJxy$4mf`7Axy~6qdr`cLHh=!v)iijq$)Z~vX z`?7D3aE1^&?+uBD$kb|FgAeum6L%$nJu^9TwZ7BQ$lR3JXv}djY)syDgKYpLck0=GJ-G zFD6npzrzv@RJc>sFYW-UvLG$P^Dy2L^m7DkUY^&2D&48N_bsRf!cl2bg>Z#VQ-h4c zMw4s0Jf-L{nqeNR5H4wJ&eP!G8P2+Y1n~hcsf%up2%H>LNuwxS>err^wfK4mJz_wT zUP2AQRNgN4RW@{pyNQDzuMZF24AAwHBUV%ie*_7E>hgL|aq?}v z+~Y+bO#1A~!udqCG9tsW@?VVbI+r7UM~}**(JUh21r`=tL;`jt{r) z)J;8)7E%cPhS>n9+#Mmue4J>~TymL;341FHZz(xX!kcK<)eE?;E#2#e9ym8Jlb`j9 zq7#Uzqo6p-Kri-OTqTqtc5gCW{;MGOCN%dk<=d7+Mm|Zc*m8niC9L!;VWK-RyYD>W zgDChWyem3P8F+N81$q2U)Bu-|yXT0=tcW`#)hd9m?+!{7-Gdg?^3QP_Dx{%wP6jFC zGSCboTEhVnCIo8JI#W*Kr)t@oDUZj-RA36bZ%HM>1{z zpx1M29zTDZ9X_W>rh%x-Fe5Z}BhNMUNBuM8u9I)8xWpDYe*L)A{BAuN5~QSRogm zu{qFOvQ(}L77<~N<_J*Kdy{X30ghAP|GK1hq|WD>nU@UIpj)#=M_qoN*hFj~Sjzg3xnkAq8lE77zV*B@6i}B;nqc z%A1&Hl*2(Z&B9=ro0>PPKj8MSl%uwuc(!1oJ5#%u@Z5HQ(GHXg{kiE7QBdF?vo5!; z)qozovO05?N2Fb4=2%KfKW`~jo8R!t(=Wo*rxRD^z@@O1l$A#<^4R$Sr!`T2KQ{q> z)zLH-MY2N%)G(tD!lx7ePYc2)Va)b6j*KdtNhcb2R9XWQiL9O%S7EZQ$DHXZGmMke zri>A4oA#_2>9xh&QIR>NRDJXLB`V?}&<$GXpzgM@_#a*!rUF_YQ zSK|9%V+r;tJ6P5=NJi{43o(c~K-3c>Jd7_6^ompk6kmc{g-*!o`a9CP@X06i<3RIRend(%P^lC9g4_GwRBe z#(c{#xe40G=b5AQb zI^}k!z3WASmUJ&Mx|qTQykHJ21?72MwKA_Z!;REoV}3r`>N`F{SEgUW8SvA{X(CWs z@6w3enkbikWso)YiC72~(@7!-haJK2Xc0<^Hyaa~P8f2WF|H^BD1gO`?{JS*QoS~9 zhAP2ps^40JNj{pfdl#%IzM7AiQ0*#llP)FEUW4soU^PE8e!(V_zXX-S(qU+p=b7iE zs{d>mXI!kCC0B8jS@R7mdMlq$_ID-YxZKCdduK4_1-+WC6XeEnN9URoOp|c1UD(cU(5|-x<9BX2U{)JymZaRlmb18L-2B^;FU+(?TxqL7@wLIMXrmG z=BehX%roalP#}S1Bbc^Qr!>6zB^3W``SSkF^2J({Ab&rs%>2!gJS%dQk647c2NKoe zmKPxdV%=AQS=JJJayeaNef~##gd<-2Obr57c3r#@WW8&4=ewb75d3gcv$XRti=Ty! z_Um1u#Riv2wy%zdB^DYxOKO`nK}%{E&Ou9BApT)hl8EvJAq7ktMn2z)2(tI8L;JZE z=H)TS5o!xU1Jwz`qpziD+g$H0$8JT&2g2Y}!dcL@Er!;u{+E^|qJT}Su`wC%!H!Ju zCteN+K1V6B;3yAGe*|_Dx3Y`e4#!Y0GMWxbyI5}MJj>h{HQStSbz)l($<-K+l;lV* z=^Fx7Zq+g~ACV5lNfJW#JesOWKgloa5#XdlR4%8IjJnZVbU=y(Ng>01{tvhSLlTn0 z@`yQ6MZ?&*@DVM0{uU>@d@kRLy?xx~iUBKF@iRNwP;*M=ri;7m4_k;QM_%ue48n?^ zJnLH9ogy*KKHbYa<|T4{m?d%Y_$&jXnL?xY;lTf*ILdIA@#Z^v&II8o(-W4jTd58s znTptk-7;v>{?}-ViQ?xTdB4w#U(4`K)9;lt_;cl~L}Elqh7s-Pw>YX|1zeka9Z}~( z+R$83XCAr(e*ja@EX9jx9w&yMQslP@iqL+q`l5KBM9fH|z7!{7)F*IKdQg&vbW-{O zLeW7?RecbQ4oO26zi|8TA^o9sgUKgXeg6^itFw!LvG5_1W2XG_1%$17goxQbX^3=a zRel@KbV=XxvKbtpkxFq{dhnM))0GkxBpp`=<_WOnAjyrn&o6D&wH9>hPY~mk^B@_< zL~?_P10S+09Wi@r8!C-6u2n8oB&%8&f)k9ty)zw8+(CG;pi8$Z8r5Yt5TO0h#nUL| z^+``jg6V_u*NGTCFId?@fq4LH!ryF77#J~2566KC1UaCPmB&^LSG}slu_j#fO#c_Z zL3i(6{?=3X*l?i_{NH~$Z2VLlr`(tr=Tg?BJu*CX)u4(q>B(QRUbs$6?ZfVGyY$P1 zC1mwZaIlnmM>(R@`Be%$jjlD)&SN$}E|aoxMN9u1Rd2(0dHUI$E@3XTiCIQA>F(l; zYmV7enu0G+iU=n%OxO{ghvN*!*=i72utz>RrMFjpf0y)Djk(1?J(C$0uPiPp^37~H z=ESPHPN}aB?tZQu%-d^%zIjKJ#<2X$@3LWX`ieZiJisI2;rdpfd+DW-4fJKw3RAdA*XaejCO;3bWSeyHKvN6a>df$&{+Yk~nVD zC3Obtq2@}Zg$z%lqDKCk?~2Zz=h69lx4SgbGnfAf?E&R)j+*Y?Spq4=_yAxh9fEeU zPn~D<;#SKSS9J9;?e#C^);3}6C!VM0WqEu_J75f4j}n>-m~${Cf;0nc4%j0%jxw+5 z*rX!=+RLZ9eN<{~QZ^#|_Qd(;I`#mUe$vo1J){mhH=QZ{*0l_jY;Tm!*^AGB`Himr zp5j~t=VXiZQ{4_Xu_{hZ6G0ncl#4BJ*)8>_5zZdUp>%>VNr| zx{nw&o)+P_^9a5d?QuSY(@&0ZD*s!{`S96OT>y_T# zhJ`Pfu0SP~nB@>u2_q`R8UVnWa7nm5vVZJ7EkpJL%S;DmG8Hn%Y8sX(9XVjw2ZV$ zP>^AGQG5JP?Sl8$)a@D%+9aMs`>LKkbs1e*Sr~DS=m17=Nt}C^dZcWk-YXuF{R3mt zu0@{J68QF|0~&L(n;k@^8yDn&1Y0!5$a1ZGZ*WSdk25qG%#8|)|AMKQ0?mc+SVgX{ zXy=%OKj76&N>6Q;r;a!?Un(&g2Spe}&bBt(-Qn(==#?UC;MoxV1=MtYlQ7(?p~kSg z3<1(+t{x5NeDaDA@z@NhaA`=s7JfFRY00qxl_Mp6gS%CWl3Qv{jjx^jyIbK6CWW^h z^$V>p6)0H?Vo@Id8IJC&>^<#+Tt;*hRkh~aH$=wW;!V`Lzia=D$;EGvYnu=eZAY?I zGH#sZcdhE6StH?xViJgy-kZp`+&jwx07_p`n*N2j(`4q(Md(Mq`qCmZ2||XwJ4}zP z(7r(~4f`|t@bKX40YZaAC-sXI#fn5^xo?fK1-{CSWbC`f(oC`EN-eS2i=n&b=HHZ3 zJ{CHk)v&)hD>~aLNMD;QahVb7wXwdNWxx6d-y*+;XXZQ}g_LACb|dF+R;0jY#F_#= z;f(>3m@LAKSzs5Z1huX+=y%s18$a3gvFWiwE-gEn+uO+W;y-|2d`xnD(`OBzB+;B^ z%P|&CiX!nEyv3m%@^#`jT|qQ2bF$WbA2nR@|C;LGoGN6O=S&6Rylra~c$`7MMqvTt zMAyL|THivRrv?R>EP~@vNHNO@~2s@ zc6J?LR<)m>mlw(;m{m)XVD>wp**_|11tkFmc%WB@W69B)A3K zh>$u+jCKjHH*f%obY@FCh1K~Tlu)S3u zM^f-sR(A`^k6Qczal&kd_+&{N=(6)uu{i%V6!~4wXxMYfu!A`z06R{fqgSD<;@h40 zP^7jo165h2_Rqsw(UyYa3q^9XtUn*v_w3CiM-{SMc}4!<|2px< z{b^88#Ou%>eMZ3gYuLZx`N>wXdR#uS=NOMo)NkIKT&hS_{eWXwgqjDcAp*H@Shkmy z(-1)laij<_ZrpzXO_-cnE2L=%cN#zTV5O;ROvBh=UF58!_-we}$dJ`TXZF*H6GW?s zp&gdgL;>N7jHUj0BZU9gR^GP>y%*j{4{u*a(nNjyhb3}VdepvDl*il4PVwoYk3av7 zxZm7t{Afkdfd_n%yMzCSy}$Jy1qE2^l*=5b3PRB^$|=|WXmEd~hhxk#6ZYLh@okc4 zzcstR>f4#<5FrTRS>G(D5uyz#Pq#~wOksM_c)tk|0>`xUjrFp221+MBGcO9S^u(VY z6GKQtDaRy4U_jVpCj_n`D>MxJ;bc~azMAyQ*bK49!>i4LPO*jI4d0!;VH%~lA9`Lu zO&dFNeq%QWn1L z0^fupVd16c{HCi|#78GNJCq=4AUkU&iqal?&4QGl|uz)pvUd%5HPWVb^N$j)1J(LQfc%^hSgT;t$04`aC zdf~;vriSEeDB?1NBMTF)%#sG(n!O1AXXxVr`dJLSgh%`)LDKs@v$+QA=(;k#3J$pwj> zYT=5GXA{=@)wp9wE+blJBiNVmplmdCG{GI)&zHBDEhRiET++_v9)CfpW1Wvun1Y%B zlJUgoQwl_nBW^x!$)9(}#5Q?aQnB;M!*I;|Dd2PbASd93K6b`PD|;9+boCpIgJ9+B zr!(KxRL~OgZ<~l5aDmJK02QL~t9!in2XCIBx;A5^r`)AR5Dy2ZXUfLJ+@~AL-cPs& z!?Kh3D_daZqYMI#b8Zojbtw}^307=Md3FNR_mRj?D*0sGdWB1NMLaX{L4l+S#8yO7 zt)n2$j84+8we!G*v})5K*|$M%Yl@SqJ38nKz9p)YHRM9`?VWWB_>+w5mz>_`L*JCT z>zY*4CS*Wi#>G;|p4TjfDy$!oL}Z*5=|7=kMDOOWZb?s2eqY@KZc_7w3E7sn!2Z=( zSEJcSt<&A(maj^4Cx7Owm^iP4WBn>XIQYEa8T0q(MhG<>S9pxdn@?;wa?suiG|C@v zBJYA5rJuY{Q1w-^#fq_glvOkxALU=5R5Kn*djtt)YGD{p9^@0Z=xlG4342r;i0}cB z?;HF@Yt(UUQn=V$rdTmwoY2q9;szYnaPPC0dv&QLt6#ABimXH5oS!!`64T1aMbm;P zNai@CoI168L`B5{+Y21q{>G%;2ECh|M)b&W!Bfpvu5hoXtI>0-v!xSEV*z?sXU!BH zpilXbBagS9qM}%vs;Mg)wNs#l4q5wjkefDvin>+Zl@WPB9Q3V~TW=yV0ozIo` zX}Ukh70gRDHhjS)^$}8M4R(k^d(`Q~SDD&d%U3An? z^D5!79)(>go4=8fQb2*Xp=&O+S2XE)F>g&y9N)#;X}rO+v90I0$$dVNfP&7^*E%Jk zDBMZM%+OWDZX;q)THBzcCwwrp z#PJiGxjOudvm4O!mpbn6Xe4w1ab9cNh5ELoidZ~(9Qj9qgd(rPAV8#?HN3!QWLzj4 z#L+XATKbde>gH1QX0$ieq2aF)pRyoIw&T5^rItwGT|JuPUV&ZD#hb^$SDqf|nSK+J zo-zf~bVuG5T9=gLc__FS_ju$pO%X_Za}u*Hlv6e1gvrjf!Y|obP?01R$>U=#M}JG# zwOdH|m3GofN_kq(e561`s48-OXCPsbsFFRvln=6^B&8p%VrP?sB%rN~%s^sZI_m|z^74u)SqQY=*2N`-y?Hdnb~6ZSm1F>{ZTm7ywNjB>XXiIq<%itRfA?jZfu3N@~Z9W8=^ z>`|vWFi9?t!n!_f|HT%VY)JT{HDNjtW@@W?*n(qTIZGCg=$>e>!*eqYOtG$_r!U!; z&yu4@OA?5j5>zx4j#PYY!14+_cX%&1QRL*B3p%t@{UuF=^YjfdkxvSrf8)nv$&b7X z50`w66yp}lI~rM*9C#C1#`FV@$zJBmNj$XO-f{}Vza(ZsTGLj?2|c&${COZK{HStM z;+>DMbvX)JBPYO3#=}yaCmwyn#hjC({n3O{T;{6wTM4;sj)C0yxT3o63QfXi30f?L z7&H4E%ZUpowgW2?ZLmWRi0kp5Jb(RM_4@(r@sZsOpQ1g&mF7{$*v-QDerJQcg0|>g zL!?Lh+{!r-Y5{yq;OzmZO+GZnQq=urN`!x{mJG7=EK3nvs3si5ah~NOSvCdf0Tx)a zS;E4_K3g89j?&f}wCfsNwMF^Zgt~87P6nls4WHn;CMm^7SX>kpWv5Wb1=3>L)-3l( z{{<`|SY|Hl2|+4i5HFjH<49=T4>-Dcq3$rJty$1}Q}Qo42AxnzEHD@d*@XvENO=nF zX)kaZ!=fa?aVFHoT~<_iXr!AZ>LoSpc7(1Uo`yazo@NZ9Nh%~oM16=eV*Bj7aF=)0 zdoi}Zrsnk32)nuDR=Uvc3gVMnH}k4&OhsW5Ea^TuBS=AqR9xcc2v++6xA>4>@V?Sm zOnIBc6bAy9-F_P|*KhOOlQkjpK1-*nnGvP9|UqA~$J18+zrmR|Fsa#!H-k;w4O^rB5f) zpczlJQqfXuUKbAwl!}BCSs+P_=IoqA=j2^Ty1QfMXZ14N5IXRt|9}HnxmC(mt;t*U z0K1J*8NbrQZ59PH$%L8t1Kt-k#<_@`^AB4)yip-N0QQv+WGd&8POAB=nYXWY=)mGH ztQiv_m2q{|WYV**&BsU^wNKx;CJKnS!g4{SndgL(OyMiPw+=WxBwIXWN8OMgw_+$* zb{mOBqbg`I3LP}xL{@>`MiN`SHTgz&s4ZVwSlT5?-X>L=zZz1t38wkvUH#pOhh{?u zce^}hzSFy;xXNP(15U2`22YF2D*rY-ca^-c`>|kA1m%QNAeC?aXcHZwGbn9K zll^jh(EO)^0n9uGVxpamjH6h`^u1H5wOzz;)avxij+yG*LA#1wz-y3LyWgAOI!GfSmxG3{J@^ zYd9m$7?q9}%7Fy&8Qk8rFP z&A~1NNB3)@+oJO)lL)m#8et*7ZUl@a3;!Q??*SE6vowm%3z>qTx83b_%5+n~f zX9NVvIR^=XGUOpgkswIUA~^_3&N--LhA1kM1ysN{e*bsQf6o2?b?;m2-uK>GZ}zO+ z-L<=`YuE1TuH9X``Yc{eEL1(pwP{6kvD@?s4i1ffZuRz&ZZe!tTWa0dZm>u|(XlQf zTG`q^_jBlXqR?ND1?-l!E|~DivC}Vq_*e9Ye`~tWHpDv`vFe$m>B~3obc5P{elELt z#T3z#@g?7q%292ny23!Kw26~oTB;=L)v(r(>U3=?QZeft-)BDM=})p?$qAp(VT&;x~DQ>LMQlN*{ z#cGciGj6w^mpQ%)ZZ}q)Dhbp`8AE<=OI5ageOa&f8?bWkmWX2Gc32IT{K8Y$@*V%=c2%gIN{rgAT+)XO732ufOc+7L{3zn3IV~FjkZT# zDa1M&U(jDnnD0d`f1zMBk=FG3eZb&a}K#? zV2buDO>@-T-};s>>TR|3B6KBnET8*8l|J1jb#M{Y^a!v2R+G+RG4Vak_G|mQ2F)h{ zVjt9!Yj=N+2!3B#H|3J)$oJl#M|vN9FY%w_8Vm~L&FHw6 zsPaXyf@zE|5{tgLOrpQ4dJ9PSL~Tj`PpAkng)&l&T3pMmA63lkk60S8x|~_&|9^8w z|IQ~zv045-ryJP$r(YDAA$NZRAi=`MYKb*(|G#AVUnHJY3$WpSd1Uq2$n@4->;l*S zn@|1UdGx>6>^PM^^T{lEP#T|>79L*-Ea-~9owAIlW&TEVYcz=G0lRW|gz;P|gLA$h z3&WPf)T8$<6Yd=nWhpbj-DuL2xO#U%I@a(-agWbmy6);Un?xd2`AmNm<+jcwsPJ#)K?t z(&oY?>J+6`ouA@1KF{RW{0KK!l(=Z(Yk8;h=F5(HueXYr+SzYF>Zirj-DDP_!6?F% z{th;ELW{YAr2M7UmBHS~ot2vF_xz!IIKKfN>u0ait-Qs2Lur2MuY@eS>N5=Ti`P)T zRMK(z_#y+h2M7EaFW>)tB0W8wEp+qXq3Z!#&7x>-ZH>AuP+TGrt4`a@lckwDUEhl7 zFy_p%O$aXjOesuKh`ne2s8QXZe2c{)3qe;inE8O38ZQ&!-2UB@C}(q?YVD|7$8TPs z$3CzN@CQyH)Lr@-H;3)D+tLi53p-Zpov$_5v6%L^cvLGH>mi|%vvt0oYpUi0^%z45 zFUV&0Dl$7wJvNv7-hDldi<^DeQZD+6a4_j-Ba_8Tr&{40O0Q7N$C1eZzV|m>+d&Cf zOM35xv0h{{cl8syW#j3fThBL3{GHA$jOX#!lbOhty8wG##mnmGa{eAV(`T^+=V7{R z%X*9hoHU}wgh?(7JO~uM6ks`xg*H)v7*-f(-PR-G?i8o23q7&%>@pz~2Q91qftYz^D-09gIKF$^O zImQ%jh1@lSjh(*%cbcaXLyDpzBS+;XUySX)P8ZwSX2Bl5_8WjMzZNvt$4-rs>-@B7D{Z{tczHJ9t>Jj3Ci^*V zGjr-PEp+a`qA;+wCLl8U(jkG}9I#zR|Fv4f3*C3AGU^i4$RT?MWwWM50s2wj(B9Qr zX?7wa_S%~_q8WWzi`ugeGU<=q>nv6txD@R&2$u`ZRk z?F5dW`VGj}!ETqrkKHZ>go6vgZl(#uZoi5B0Rd#Jg4Qs3y`y7yLbRr?o#dRj za!#5)J}c{db$7@4HJ?_Hnd|7>L*xG$urNarjY$jQfrPKfNfT$kcPkH$~$HDXK_dx&* zvN!4y_T$70jLHHcLL7B9)HGCq-PhL88L#+I2;;z1qHC(4{w_k1_G$TsLSF}Jv^eF3 z+nO1@gPGb%Q~HiG_voz7vyzv5YBI)YA+I=o{0lfENQjI*luD%lyd$^@<4IkwHe=)4$|&9i2q_r6f~Y_kR*>7jpIRx>_Uz;JbIRBSfN=^ zM%|PPhFF!ya^gC1kj!D|I!g^Z34FbmabP08O+1~o0Ef(nbz#ji3eta+wxF|=?Jw%< z0et}qa^5=80-mtx4ybD2nDZrjD=ZnmRan=(5W(LrkiAXnsIa)-b}M`##O7;FT9uJm z)qAt5F=rz7{)P|@?*7IQ4dMPqDOdJE>|&6(Si{<;^{#qb*tyGZ(YBR=R>Uf%?{IXq{_r;CX1sETd|`0%*u zIjP9N?l#48x^8DSg0FP~a%V(>E(TJ~+|6w#5udAhK^%Eq`P|m3G)#ivD|#)EwT7qI z=i`_LpCUT%Y z7g4XIZd-4-k`kE&nw?_3FjH3?HGhn<%-b-YYB&58Xe#rPJM>BM36f#lw^?$SqB-g$ zhVrP!YwrOmGo;wsDeqfqh)^jgk63`kSBoy&&jgfH!VUIRBohuZiNU4Xxf}t4j&LLU zdxtp!O9Hh$;hZIjNYYp&z?>ZC4`Ukn4M*!V;hn`gZ>Dk>PVsITi^Bk_Pk6J}K)I0Z z{hTqTgIFNM*uxE9(APPCqwZz1(Ed|ufh0<%tsh6C0=hnXP+e6(4^b!!L%nfL! zeezH^Fidk;oL_@NMXI=$F{Ot_ob5ppXEfMBW48KRGo$^ndiW9Z8P50t>iaZJWP$Yx z@zK{CYhs$n{y6?Q+?bx|t5I;P?Fk^_y07#J<-7V?Mp%~KEj2>=%kg1&qrwich*G#x zoj6dT#vT%u{og6g>0_MIA-(SyW^D9edbx9@TIH8S5p90}padt*RjIlM*_D zoWno+t=X$8sY7H^hp-or;D zt!XQ3<@UY-5eBWw_sFff2X5-3HH3zC1}#txj2OZkp8kjW4M4F)0Q+^$-xB!a1s zu<$)iG}9PFYAqTO))0_LBjBk!Z*^V~?Uc_3%NO+Pg@FWnOX-9`P{0QFo$rxCrkSz= zIA2jXC{2K|!ZB<;7?;f9t^_QODVw3N-%m+;1SdAm+s}Ii35WoBhos}7;9?exCpy?I zI^pu&cxQzDclYsK288d?o8nNjlO26M2kZ)%AYlq^k5xKq9)s}{F*KIp?}`&t9-(rj z<04!}Y$S{Eyy)$E(Gf6n`eJ_pPsK*A4@uc|L>;L(qcQWmDsFpw91wX2 zG(t{}AwHLbZFkTj`P_Zte)#tS>SdT3s4<9lhMtU(^_0MB$G(am_yL8c3!pT83P@a? zSLCS$GfF&~>0qg4O&hOQ3W}sahP4K!z=xNis{;kz94H`CUPytK3lPq;-~U{Z5w%GcDBM{VR*wLaal~?tyjzqg9su4*Y0{=F{M{kQ zy8Iu%(kS0VD zmMDp6xAHbU5y#pUR$!g`c}6a_!?|YFXolnf|4xzmcodv#SUnTbT6p-mZe~!>nzPEd z(yY|w**^lqRDe|fO0)WVKEqe~{|vlSKm^>$bJ(UKsf;Uax~C{30t2Id+Ge#k63LpA zJm%R$+iziqm{-qjuNge&UdA~%dnvsdfdRUhQmU$$}mIodS@AN`oD1QBsCf5wB zh83BDU4fAGp42nt|H>@-mq9fAGpJB#AuKPH% zZJVD`;J~n-Fk=V=09Z!6#w@ZktyLgx;m>!cgH3Qa#195v&FQvV0l2?S!i*XSlqpO~n1~LS# zyY-~6(2R;meh%wgt`jEg1?>6qAC_6E!JZZqM$>Q4foTI-O6lgnwa)t;X*p%^Kp(#YZG`c)4KBw~>FecvaHEtWpX`>kI5ilk`!%$1y83vyME4sQzs#(MJ;t zng&oa_cC&1fow$->&rQn0jTe-%B&CN0zjBG9S1L8)_O{eXFeZ*0TJ0DTUqN zsCatp+d`2Wr?zO6VML$CrN5WC6-_vIIk^>)L`6S5jfGW2_JEKi49P#u<*m+{%OA6% zt*9J46XiKf5v`DGbK1Zhm@XBkQLXJ~zc zdg*hXGKN=5J?Pd|^#1YcYV}jl&mX(EIU9emVX@;((4yT*omQV+uWoFMKFo|f185B| zXbFiWLN@4T^xJ|+B6->=&B*uEaRs(H$RhFC#BmF_B8!Fx`iV?#4S}509d%vn@P6U_ zv=*n1Y*Q6~y>8cY$Hr$mlQVENG4dPkL1#)6r`Q?~Zn6by+_y30{rMM45s%`a_iqy@ zvJvHEBJu;G3VCWRozV`^y5lRja-m7k^%zB4_kkiVKRZ-~>t$N_gK$9;j?=hp{Tgg2adn@c^m-hXX74fQj-LdqXznHWQJ6eZ! z1iVMe7wN*%HeyyqWP1h{?(?$Nb88DcQe8o9pegeWp9$JhQX?H-pp)QiqpGr5nTj$DgJ%FZP$0W1 zIWKj33~xt{&2IpiKl|-Zd~~*H?!&6pJ|uC^J3$dB4m4Ub-&b7*Lh<}*i8_=^F{8)n zUil@sdqf9BgQC_-iCh5*Bb4#at=ARhwpH-cax=EJQ`Ba^5cctegj<_z>K-Z+9(s^U z2qI$?Qb9P>@;#AtY#^`12)SJ?3C*W*H0)Nuj~sqbn1hyt$Gw>O!p;3zrD;w{_@z8; zBISdWKv6o$LJ(Z+yH*y6lF~_#M70j*F@v#}3i#F|l3j2mqhA{rr+U2Vy)q*C_p;p% zi#m9%-PBs)B^zO~LLd%SX-nPhTb?4U0~nMTs5kG`5G^OB;Y$&rm{dOT3Hlz4G9W=u zU7!by;>aX;ucvpmuDe@CP&$13s?7wfWzKAURQGprcn{yo-*() zOYDn}hMnYW7Z88v(qzVY1!kfZyVP^IFbQrN0o65{Lm}f4FCGS`Cb`(H&B7c>IOTDG z_mPpElh2692Ct5y!ihTb=M0a%4U4_wQ7J;VcIzD0Nun`0kzXdL&{ zT54#>XL{38k3aJMXT*0lD;_UN7qn)r)Z?YH)Ka$Y&AfS3?hhhj1z&dDSr$HSm@N73 z-c@DlpZA96unWuJ49n9~|YhPuJfv{?mU65?8wC4OM1>Z*TKgpFM7_fudytndkY z*_ta4axb2ziWH9#H;QdKt(!Zs59o`2q+H+?t$*tw73?F0BZCh=geVjMA=(x$u1wj+ zcMc{{93lI{{VXy?x__9GcYrf zGo{IGp@uALE2HqVIGmVnR@76g8$f%5SX{&)zee)Wo1)>mUn{g)n-!|znWQ{B7V@BY zjUnZHvv0x3CrF@$h0pGPq8708k07asqHRlXcfX*v_A;VqeogXBFIX zawNim8Gh6-!iT96bZ`+YJDl^~n_yd^y!~rU2^xMUH&9=gZ*+C)&id5@d)iQ7!W$-M zBHmK|=nOOcVP~a(L^Q0mc-{U6kCGH-0SL}kfa(w zb(YfL@*=uqLdE?W!X+6ZUBIuxa(B}}HD4zpn~txB;Eg3(#R(?PcI4(S9Urwm%t;77 zk^4bCIOC-)I8tZF+1o0Aq^O9$wL$frss`pzmn!P{>a5nH^@hc3-cfu;It2mhzkyOsTGK|dB9;8h2i~3uJVE+tC6s# zk#rjt0}W$*H2j9FPCSROVzu3y2f|bo2@_lcrcTRc^QAPNqtjwlgDp(IGvVEBC*8H< zkZssyr3LFLew?w_-}~ttlv6-F=!-Z>7&#&kq6w!cvI<%W2Lkb2IMqAKb9yda6BHGZ zsQSbae5=zNaj07Y?5AN2pss{M2w|Q|G(jLqD*)iS=ioAr44B$2zL~=ad}aJKZDxJc zRJ`FF(>q5RRmMW3Dnp$TavOg62OP11;EFV=y;1%pA>3}SUZy*D6HM4jKoX z11`lJ0t)x1&z*urq;r@jNd03v6jlolrZ1d=o3Mayqz*BS3XA^$EMWovk>j65{sB1n zgQH1$35(-Tk$>X&k0O6?{8i*nj{huzt>v!%;U%Wd<|V`cQem2TO`^Uxg=A@PqgGcZ zzirTGA4sr{mY2K$>2 z|3=~8H&s47eGNPZj__o~T}CXDoQs*3L#DZ5l9yf>n& z`riTTQ3v>WXa8@Z`0wbb%=l3ip*!cpo#Hi|xPilZhoH3YlUyt4EKIq+SVrkhbYbTziij4gs(k;?GO)6yPxKZKSxUV&`u67&i%)|>i|lztU!p*kz;`e2p^xmfwp z3KL6uR$r+^l|1>jle@vu$~*(Yh0`U6)pEMMZYh{yH026B0;gw~?!yaN+HkICN>T6% zlf1pt%P=sAv(c$utbhU!)g6kb?esFDGAo5gf;@RT7e%G^lTu2sEl3xom`cVV<`u_} zlEN9C#eY^p%BRX~oFerK9%LqW(iFDi{BPg_uN0Y+!Y=V8GRkWK3<>~1u=Lrq`PmMs`bkV_kiu+dfh^G|&QWK% z9B$k_2LPk?17Vsw`yHW#+D>_4+`JF!g+M$LxFq1pF24^z@u2A=MTVhq1~vl+t`bU5Dm>!b3Q1@cczkBB!y0wvBMOJ1r=$w=tP;rhmSjTd zSDNfcLHAqV5s_|JG0ZE>|45b+Em+# zXH2#*ClOzrgIYWJxR{O^ZRzDm8^uP z-0pLe6NXGtglV=R_N(nk$S$^D8d|1eFa~T><|MsXj+8%G{So0Fap$drNo}rE;5WzZ zZgx$80mt_A@(c|(r4QCj6iy1V5}mW}5>Y$ph!CeSf0H*{k zd`~fOnCl=s63wsVtuq%dPjmn14bQhxWhZ|aWnY;-cB-0^MQ)mDSQ!c?68iPQKJIf% z%xBT>aU@NgRDm8G*ewD`eEN3o^~e~hj5cvcJ^MrF%RsY7wr9p{CL?0+@eewwhIiGU zB@y{RWdaoa{O>=%%pj(*?oyRwBXd&2ZpFGklWvV%t z$y>4An6NyzA#%@JlAP66bQC6U^F|@^QX4EJLcQJ1bgY%CIRZS2BH7rE@rrb4yh9{( z^!JJ2rjt-gSE39o@k~~J3B)bp6^e>WCv}3=37pt{R@fN~XOo>Icqb1k^fjlbM;s=h z-iKPCO-HPvsa0aKJGTF5yLS+7<_{}ZdnUQo$|4jM{l$kkSbK3MAW*fGxH%XHvlEws z)&0~D!IfF&hNNOgt)@@D3Jv%zE1bQIe+<^eP`s%S+RttPq;*S}D2MHm^8<1aK3-WJ z9)==v1G15<$BJBp0bF5ohB~k?7=T=Bq)R*naAJ;=fl*_~YLMZfz{B5oi7BkS40$EH zo}l6g!6;q-xR4Es9uht53t@qr$8K>#U5^EMKGZ8wy;gBQ zRM?DnjKeFv7Y8agyxA~cE>8sThJEB8`q{|xWjwT6@TTUkXk0L!1Hw*@xWG=NzL6A_ z4DO&<_fu`O3~cx!-TSU7_+t&eOuX=brI2_(k}wcY{JF|cTyCzRjTsuP16>XpLiT=b zKP2;>C`e)>uqPqq6{Sj;iUVz$4J6hp7E+9hH{a{nuVxK;fHwBd&}v2D7v;J`5XG$= z`D`HMcJE%{qWtNs{MPGD}Y&@NkIcAZGKsq8*Nnl@#T(pLkNbI62FiT!WWQn~{B~Op>!NgN zSbstt#2emUNGgPf;K&9`a>ox4nR-`3!)oeg1d?e04^H0^qpRw&TuG-b-i1d|vzQ*_DfwJ)MaNGUZlQ-uD}yUMdIR?Zp8VnNy$qoENAV&%F~ zoX-VD0aQKhAOZo1qf(WCL2kt>8t`v`xJx!Ie>f?O46L(}VqJ&Va3j2l_R!Th1qTxh zJBvT-?T3wT0I9#DI_-q`U^wFm6yY0O-@*-$dr~hFbd4QeMUHW2>@j_A^CRaO#vi9p zDxy`>B`WAXB@?xMkyG`#gia*ofH3s^lXm4%dE(8m9t8|`8&jWJw51hZ1Fc^%PZ+*@ z0WO6J8dD)07E8t!j!}WsEBGnx5UKtwrbGA8Kdq%$B} zuHCr1^CDl&NNTrsoZ>CxSw0@Gw-B8^zFf%%3Ze}#5Dl`vB8{e?FL5;7Z-@nzin59O zBejvjtTW^v=#}kspG#_M*A^yUs;)&dGywL?5>z%U@-^N6 zIcB9v(Uy2JBK_}=-tN%fQN(OeSBZBn;{5G@^j&N z+WvP|%T{BDrkT9UOFq3fnNJ-icKdc@T6(wgFfusceJe}i4;Cv-t0~vEk(C8{+%Z-o zojlJxZ=tGNTcx9c3QE;!NzYO8v;YG);-)g%47e!4%b|??)>S{!N^TLIagYXJN7>94 z7ylj@BaBBtN#(KDMYm^eSHR9*r5Vp!sW z$qxCqwREU@FF6VY^n#!isA6N_9tzannPKieutGri}r# z#1R~sX}DUWmF-4ix5{=5SE_QK8TeFr82t;V%UP|Pm(7ay52acK)^%F60KrKs@I1*kZr7tkI|HCP9J{4ou%V)*h9 zv?g9Qk3s!VxcZH*f|sR!;kIUY=}!b zBd&KiBskSe!n$7hV=I5xGfeM*8Psd@vU?g@z!>9rM8eB)$t_xEYmi0jxoWLxu0gM3 z3%vtWc#^_&|8B7@`K1m1qqkp; zG7>p9Kua=Z17_=1RSNGJ16bN{MPw56Yb)~0lY`_@P!Rs{LK@pVJaouP6%1g6^_xZF zfjQYk$u&~zB4!WZbQ3YvHe6i&7FOMku3Yg*wumytUAt|UF+MaG%OCEzO|LP3XE8XaHdab;OvZNL3DUoGTc5;G!*S2tUFB=GBPsK*ACAc<#aC7FM~oAW|Z-I}og;QVxPCzn{{xjSXzVPuGnz z*e{pz{R5N`_kb1K1YK#B!U2%U-+;3!e`@@P^Mgv3C_L?895{OtR5Vg`bhFo-aFGk0 z02t#y!izRdy4^9IVfgj~K2n@S!?~Lps&j5MkE;fgYB|)Q;#P=F<{m)~TVWuLmvquP zj<*l1hy#iMFGfqt`xl3#bTM9*pQQeq#OG{4A}_)vZI$sGQ6u`6gajEw@ME&xXc^+P zWGmt`3Vz*0!AgpKlp0Onrnb@*f)%81vLMb>nASKTKUI#NARt9ma=R$|Tr5TaJl{Q# z;+LMpcTKaka~Fkr^9~Rk5#@Mhys^p6aE$O84G(xR9L9PgEp#qM2nHSF1htEwMYl^4I$K5F0X=e*|d-{YA04FEYI=BIOEJ#;+OtOzsV^KCqINk@o(TAVfOh@s%TBJmC{ zPA5tB6Ao5fF-bxim#gP?1M7f<)mp;)+v1yfmV9JefXF!#o4|Jji}I9jqD9N1z8FN% zlJJIDf0df0LqQ534Hv-8?G(Ly%~uh`&3UmG@M28 zbO}6z#&`FKX=vo9QF<5&L8%q^XmI(TXFDE*bIUu~fO+Ki4{pg~#r#pa>Q`qy<3bm0r#!05Pu*H{bINXM{u^(F&99JKCZLWXF$-!f{=J|c{D z z*Q!aB-;?CaO^OtTT4@|}#ZnRh$Qo4FCt0G;i~(%ZnsEXpuYJ@v;!Yt}Ufjlflb@(Q zXF@?puCQT;>p*szd|!?zNsxysF0H9ixNoyECMsnuk<=Sa1;b}~44j-fw~rZ^_u=PY zrgaldxLaaCMI8GI$;>&kaIz>b3P3>taRmT`6&WC!2Y>+Z)GBNi6!3GPLfr7gfx4n{ zJiOiOc9djkEL21QH8?UIh5ratP>>bkhNZEfR_?Nb4G1-f1Xuy|lnEc>+}16?4ABteEi4hlC%7!mmKL2{H`p;qaf=Dj=94%GM1sW zi}qil9_*!!F$`h)1ylF}Y$oHvoiKPGES^1i)P4JVX4w71 zgz%|`l)Nd|0C0O99=}kVA0;`f*Ka@!L0xRpW`RSJu2OeN+Tmm1x%?jTX+Oh9USwZd z$R`-_mG0e0i!=m}H1a}NtejB!Zr{&>)y@E{qeh5vpGgQ0AqBKTJ|&=;FujtaLn&P# zhE0xgL=Q?l8C2;y-vhOY*jY`6#d#k4=-=$^WrUC*{FmGOzu`!|5Z8AS@{EWT_= z-kg+;m%8CEzHCh19F>jd`4h!Lt@~M9e1m=i@Vp+pg+0F07qH3LTg}oBpoKWZH?9OV zQ12t|aqHCsNCzr@nkAP2Y`*;Z#rCMz?j!5r^Xu)uOxu9~5C8`c7xy2w?SGlJ>%ru$ zy?c(}h_J$XIo;HmjpKhYZO6Ldoz1X_(-N>R<6u;fCkBharvDMB{afnaul+Amv;V;I z-{dspsK5Mr0#*ze7x`6xz=Nv)DNN08Qg1U5|IdH}c})G)vb0xKd>c*hp;spD?LkdB zFcGS{S9~=env;C0cjF>EqVj|1H-M96`(I!aDnBZ)Uk?6(WrU_BpZv-957_SC`Ef1v!AtbZ3*|1ZV$Zq)zVsz>Bf^2qR?yw3=N^XeIIW1fPH z*)NbRsvT>|C6vCo82a!^qEr7skA!CbrDu&BA*kP% z&fD+v`Rg~Zr58UdI0+wZ|4 zUrBZaal*U!h8a%kWiWb{@WXR8!R45>vLJ22#`F$(^^-vL#m#iWZcsCq{h|RRjl|{E zaOTptn`C)%E=rV@()k9S3L$t~yDy*61E*@%9y*iqCcad&S+D)`Gl7ZMj=?i$New*j z`in=b&a0mP;MxfWCslmh?8mtkZzz+wTCP1hYrr@Vi+}l)v`kwd(v{VY^qbB)t#C~g zJ8k}bs9inTa37z>@^x)7uU|7Fx!c(+b$0Vs97vv*2A7nuFuK1jU5_t zUFDVLpKrGN&hB)+XkgY39h>?QgfHVXG;ta6B4gsTSo)auvRYxwJS=BRA1bnqXi!$` ztdbg(l<6vMb)IW91H6i6O?sbGa95%y55af4DJ<&&Q)uj+)16#<%e^tMV8mIl$Li9V znT*%Q$-upnffzOs|B#@YWgAD9)4o?fiddEcC^2IZKRTZRrK0iSnxdvo%qt~V&7ju@ zRyBRiDW8CS90|m)j2R=*L;A%$N?A*KJ`qMTRKbJr!Z4T3!MoS)X$AfOKmCc%%`-jW z%TXdTI1;G)XN~@}h^5K9Dhjr&d&W zgjXG5A0WwvsA-6$s>Pwo%qO3`R&y}_F7~|(C2o#y$neP|*7aaP4iT!>fb-=t_S>=4 zjE}I8`AbgdbT;`e_q$`wE1$>H-?&SH>OF=t;R0&R3y3b)4_z5=f4ojP5eZ80fAZ8K zyzx8!k~cf(sXhSL7!?k0#(%a?X>D!hVm>;Uz-K7xm~S0ad^@jPdtCZ}du8|u+u>uG zWXX(gZ?41usH9!ukcgL%V&j(iGSXdp^GLq)A3|dpfRU4z7s1?)67%D}QP&GM1KLY` zud8rtPX0>mR53EM_eI&hOFJ zMh>^qtz46Y!$k&c1gcqM>KFa5l73rDW{m#;Z+UJs)#Dt3%eSoKxYqteI^CmCK(g}NUGkn@clV{@$_WG{YhHrMZ+*_92 zvHqme_UdSw`lf2jw3wcMn~CGft$a{t)#8jnNOZ?!(SOho4f@f+#mLIedXuX#qgsMp zOww!T;j(f2&wDr8od&8`^~6pfmdowRKLi)ljJ zJ)B|89Kot?b%4!&wYOanCWZ_$=sfKxH5#HXVV3i>`kPSYwZ9wr>*0mK8yq2U zci+RbY@+hO@KT4%C`+h_GtaDENwJ!8S|U65lAzhbq$!+nWkA@NhTS^udA{hr|825O z-Po)IfYMt$HjNd3v+9JBzH9rdV)0*fba@`15-3Y~u>$AsC2BnftZUVZtT611HVahB zqJ5kQyrp=4E1iqgnvA!e@%F*D$)}S-m1TW3WmRY4pl0z$zWZ1Kv_ABf*;VLHO*?iQ z%X?^7{JClLGQrH+#lv0ht^YJ|MG22rI+lS;T>b}P)M(|PI(prJsF6kL{ug!o`0|s1 zG@sQ8^A55(F`u~h%mRC+@O2`W)CX)apxx)KZ})ym9T>EHJ+|l{ifk-o2<_zzdjtu3 zAc2(9*z3P-D%`wZ1Wmp)LzztXcFVxI0D=j)ynSF2~=5( zlUbNSE<3jzKnV13JU{0qXnNM?6j*Ev5Cl>2zYgsEAc!W_4o+h!rbe@>tT$QAil-Tn zd21N8ydgJqTHLp6+nJ<>FpyIjoLZvY{&WLL6-IDI(4ZK3j>2W?-Ej92TP9ZA0xO@` z(pU2d?SuV<(ySP(9*sH{HAY_TZ~yEL0%6>A8dVk??O3-Yn_~O}dL1?3Hx=`WyqN*? zUrv^&Krvc0l*8{+-|^oY`=l&scvt;}Vj_)hcyX`6KiCm7HN;R&&QSZan@JF5!c{gq z!{@YdCC@`lRtKw9EuKfAvY`ag>DB5<{O$dXts4|?a(Q`Z|H74Gx9aL))6H8$~M2$q59BFHY`E>Z0!+u%FM3d;`i&@9ol(W!7DoL zDMxUL`Yta`9%-$ogrZEf+XkEV>kn#OGwAr@H~sCt@0RJ$y?WIvW96ot8a*Ybvn?R` zvFYt*pQK86z*`oTyixzvU_?KaS1=b=H-EX-`!S5aCdOrbLHo3oae{`q`6aG?aBd&| z_AQxB%6Vp2cS^yCC1n2Cn4|s=so29uXJrahbTnkG*y{GAjnu(xf2sI*fmw zvzgKOhJDBMph{x=Cb@L#E8U+y@|U-o>&LC&?xf=U&YZVjAXN9#QbA zrsMt*dt(!t^>X*WMp%jIOR`)3$knJ&)sz?&3h!QpRK$?iD$eDgvHwAPy};J|6`5e! zZjEBmI1xfHM)*RC*}xfb9OOM(6aC)X*fc1)+MC5EWK^(qeGFyc@Kk#@#Tpp5F8vTSi$nKR;ONmgW za>asE^*yOAS9c(c%1PK1wLN>z9ygAOd&Tzi&GM{pJmp%VWhA~XK$o6H?m$zf@vk1` zv{B@>-@vW50Ph) z>V%mm@%f9N0~#TwH?myX@hTKFuGYqyo>cQm)YL+#>CFck!d|K1x*G|}*6~r=O4C!3 zQ=n!!!}q7TasiC8rhmldsfY8({G1lTT=J{!w{OI98bS8E{3opI*yjG7UENk&B|KHT zzzCxdeFN&#$A3)6=NvKKD@Cbw(J1M%0H^7Y!uwz`GT5lNrJm@X2J^KFJW{tWMX+G|}cZt)z@!M?D-CpHQ2TI^=| zK?4+p&;D|WD4S$9Gg%m=a2H{iL%E185FUGB6%@^ivR;SSoEFEQ7#5C|4&jO0%L_?? zUhP@8CC?u>D#v9%K$E49e#K#Os*Ab{CIM24+=)=fU9Ga+6kIUjiJNH5l~x-ajQlRE zU(3VS()T?({piMW{l2$7y?6uO-qjZbOq8T{2lX4Mclr{D6W2!F5dSzS@}^YN#C;}R+8w`GT(#{ zR~ry}5MzW;uMKI9&E;okU|%S8A+cgus%hP%b>}9s;5R@=uZYShDN)f>@Bpk!K1`IQ z`67XBmCs#n$n=p!s{RLKcTKZzFUJsi-o(a-Q)-zE(w5YB8JfdyDnw%4O*P0@WLcyl zDeb$GR7Huq-(CQRd0HKUj32Y@c-G^LTK~=YO*2TtcXi;I9rdi6nUJ_U;Pnl=Enz!3c+CH@o3D4Hnl!bX;gZZEl4WZizv{H`iL-BMMfTEjQRBRa z%VESlZHvX;S&uwpl^$Omvwi@*cY=@?4og{=QFUG|HgvCpzw_UYSY6zK0{{gw&Fg%} z;SoCV->X%H>>NoYUOUeTG<59AYf^_2DjE~tq?_JjE+NMhUI)k5ScAXaTKh2ZZhCl4 z%UU-bFEVTE%f z>g9J;akH&9@}WR}*bCy1QvX+HUjY?Y(yiM_GiacZMuRp4ceg-rx8Uv)EI^P1X)L&V zf;1XDjcag+pur(nu;A{@?a9pCx&Qs|d+WWsPOmzr&R2Wat~zyA*V(J;+lI*tF|RDs ztXb&He&b6kW;5B8d0CtZa8G~c#~+KYaZA|Np^KM4wf5k=mrD9E2zt4!So8q&H*L+yD2PP|5HRK&Nm8w|#`l!PdRbD=SF zF;MrUTUmayP)1~5J0-x)deW1yqa8^(2YS-S74V$~AmN+?)9JE^l4AE_B0sbs1mFo{ zTh#OC8|#P3ToM?lN17D&hKMdO&LY`1S|J?> zZ!Ruhsw9koi{BjJRm~U8t|rny$G3U%4d4c(BT%%{S7ozyvZ0Rl*-G`YT;|(L`T_cHdM%bv}79Doq1&hP3{y)J6{B~C@&?!tuxNcn{12N9cL*o z>1QYoQ+3J!=i(!W)EZN3cLOu&&Xpwfj&rWa3!@*8&&#u5RJ2%vbgG0I9Re|V~$G6oo>GbW(t}^btXN2?5h)5 zze+XofQ;|4@kH}&_N}a-R)KfE0WZ{{WJs5xt`<>JSIPXRuw!G3P$vD(l_IRg+5x>E zU)~iBIC=Rw&ZUO^r2344QT98)!X&<+vSL6&A5LC0gmB5PMnQ^jQQ=x?+F^t>a&J?Z zOC6Z18!g_`9~ECuN&?*t0;|~oXr&!Tl+95_2CNS!QQy`*MnRE~=5Ps2+&Ym%E-A_G zBmSAw+$hiSnr5Sn&u=Q-L|2rr&~tR(lt{7{d^EYq#Dmro|;Jy>a7XcXm>1c z;ug&vEWm$>G12vy$+7z@>ByqMnD{&4*!^|&H~J6o*VW&^FWG-X5wic2QWXynL|A#& zpom(X1c~;qivN$o2DQ}BB{Bz604`=;U@@6?VwNQ&47phU7Bp-UPP5M7e?K`21{i1j z5BL8ptiG4NZ}%YYnx#FHNuPsm3CH9yY>bxrd5jV1#z9WvoV!Oa z3A%s!b_Xx}V%*`2zd)9V37=GwyP0QWywKnR9JUQ^hY|c7@#7pRk{@{E`vq<5nsvPP z#zud+<82N5ygPqd!`Kw1NnwG0;zeq7`?~FokdE#6fIqK4fu^evt6VR(wYjHM?God- zsKf?*Q91hx?nA zgbSWW)qtpETQQ$o{H14QR8N28@Q#JbO`uWG-q4yx}<4h1k;^jm{@RRA(sIod<{oVU^5*BPgfy61$B2cu6(R)r5a&kH^X^}mS6BWFtq{b&0Gy2D588lWEVY4?3l0Wa?Gcq~o1Gq&q`xDxJ)3wKk57#q&3Dtms zRP_z2gnw`jqroGkyLkQ1WFS^vnB1etX5*Og*oS0}DWw_lo5st)87`T+U2U6kzEey+ zaa6-$rdm6JFnHI#Tj6VbvY@%SOXF#YtPY4hIuCM)*Pb7ZRc3vl1fC^#V>9TmdcUW@ z+Jx-1^JDFh*D@{}a?O#!J*p(Dzg=LTZewwTYzcF+Oo8Aw8)NkB3vs?KG*B@@t)^Ep61t4stQGM#)!G9BNfxrp;*Y9bmPb%!j0ye`&zrd5gZ&qKSe zj@DB-w+I3@x#v6E4-~%x@{{#KqU{f-IVP3MA$p$xABVXn1lvN|9$Z`Ju4>Sj-q*yl z1&PjE4~`0>!Z`RC=f~60**Wv%CBnlrO26 zhWqOB2tef1k!W>RtZ7s}xJ+b#y71+niEwXvRj3jWsua5|Xe`;J)fqJB<`gZRA|buS zC1Gy&EfS~0h-A78YEY#Y=%7boqr~c_6G%21|3o9BnPn~)c0vW5dr5EG>$KS5yu9lW zEXYf*JVv$$%u@h^HTtjERl^YBSfyO$K7i|h^O4wmhJ3cz?#CHPRdAP@gp{YWmCi)2 zBMMg_*i8f5P>(ooiDsc<3Vt%N9jCNg@o$FnI1%^NGw7Ip`x#Bj7~dF<`H>8&)?&)3 zAgnJ%H(08$zX?v{<7{sVIM=l8Ed2GZUsd=&S*h$<#ZRb8E){EfVt^ZMuQS{cdDF+T z9zob(26spP-kqIntR0h9wxXw9(CO8ZDbHgBX15W}3w`Z?>Ch)KLQOWFTim!|zuKw1 zVku~IOdX!e!`m$5tJZdRl--RFW~{@iJhB}5vSLs~OP3VbKwtN&QfUxd0P*|#!RfGyaH0*b|u5G<%|MO)$sqo9!>D#!=V;AP(-jd1nRPhYV z6FXkkTn(a4h_KL5>>x$uWPW1Tg7qVg5l71-Um&+oR<)G3FMQhoiN_lj*3l#s)}wB< zEE`~gR5$TV_#%fx9CXj2BQPWb!NmEnDIVO>Bk-xIN1U9)?6FU2RKj70A$V%z!M1U3 zX!RMAAV!BOvMXq&R?lN2USHGI5s+_nX7sF_>5s8MVkHg#h&QCU*1$>JJ<76Kg`Jdk zXW^%|%QzY>_Ud0APq_>^FId|#2oTfT`ASMTr3tu+0oE4$IUU>_&49{e_d9^PYmX}g zr_=EQW`P%GFjQ+3u0lu#b{bJh6kr6Q5Z|ElXZIL@%c;|ZtpHHMVJeZdNH1yns<>g* zn;rnM(O@zCd0{w;me3%PQ}y%{v(l8@p{HgGFOpS~8$Gwpk$a)!vNz9y7vT$fX|!$c zEs|>>7R-S%fF-HoS^+!8c~Vvj6g#gQ>!0bPHp4SG>ScZyt;Oaj@dG+z=HiP*?{Jeh zHw^U5U!74uuuE%M9w~%U?pP@szWf|B4=~zq&OhjCg@kbuFvV}3i;L0-?t?RDoFTG= z@Gq{`47MR)(8-%m^5#0E4!tp*ig?VyTu)l`CL%D=cWyFbdqfT6gsC}pzNtR9($u) zXqP_dibKa|&0mOdiq?XrUt zIc8WMqXdg#M4XAD4H+H9xW`&x@-hl50DV`Y>O1*2#s!$c1c>Ex$M@L*swx_(Kbkfy zQGOK&)@3e;!>)AIoXsm5(?G0N#gJ`-pjl*AE4{2wuzvRKQ6c%Z_9n<0-M1Ax1dk@O z=PV`NU(>b?+J?@WGspXD(&&Sf3C{Jy!S;ITU~#{PVjg<#Jfo{E+)6KvGP`?8Z6T4p>zIW0jey!8kr9t8&78PTHumBiqtllcWBS6)^lzR$q56 z|7M=N;mUWi25pP#6Zgps<5qv{fF!8Z{eg$&g7t<$;Tm0B^F1QM6p#~tlv}@R>_Nm= zWzgrjyeW^IvSFse7`^!o0pyTh$Y-MBfM!Y7+NP}OCvN;d(;ibQBmcnD)PB$Ok8Db| zBK%o7U(zZI>R;55@|sy+*RTZDm0-_Yl;2amJSGCvrG~MYV~ScX!q2{h+&QN=M}~1m zz5^de4bOic->xv}$5=jNp*L3&vUBDInV}p4yL!?W`;;7i&Xdm4vaO*=R-rNT0rL2wM5P-HkMb@1uxkb${Vy$I zdg^8anBqx7SUp}(m{C~?HrIcE77%ko|K8BV7MuoO?ZMWh*WhK{H9bFvaB&z7Os6t~ z0R=?gFrw5fit#P`FUp zp=`Rt=ic~_k^lMQH_3}gMaV+ltzSED9x01P*6yV9LygL-HP1g)33+?Q6cQ?oJOniQ zF2JTf!~;N8&rPG+c6{uJ&KmB-b6jyJB2aonoHO>twbC-%R>}@Vd6Ij}X=Cv3s!x8+ ztW5Gb4` zh|`{Bg&r}mRKe4fs)C||_Kh%G%dF|WupS&MsOoze;-ZO_tghZ6JS@*JR2%WrY2OEZ|FKS)FZ-861t<94Fwoq@|t`rD)I8e(WVMY=aG7+8L!LTT-o60Ed1G; zWp*e^-Py4#DlVlSzq7F7jI!&YJ900TRb75Qaod;*14&@=#3Zig&rb}Kx8H}b@LpUM z^<;NTLA4!Unw9ldD{#xA<6VRu)aq6RT^0)2^SL^bW!z17r8_CQS#0WDy}=j>I#5(K z?bHiDoJ)@D1HQvoq$G(ob5gzmF>IdGn}yF zyL!T2Igf65CZ=$)Nk{%d6p?;a)w2y{$Iq*8=HIkin9@YTt@dihbVy)TWF;qJhzE)? zdPV7Znk#rQJ^MlzJ_edf6njc+bKsB$^_dtH5Y9# ztbLXA?VQ(aRbUxD2gg{5I&jB(@e-FQ z&}Gy_byucl)9X_&oYM%svQKs89spyc21z`HE;Ed!F^w6>!w6BI!-#?lyct#=hiMGT z4W%VPtcdt1`%A_SH3F8!!zY|-g2P9}(vH2QmJDM>g4h{}m$)y%#dEiSidpR%ynKHi zow4ucoC(&B*ymB?#j0tI)7+@@fmq)O%jWN-f>_M(%!xi(Zj*!qt0=^xPd-y2hThC& zlV9sp?*lVZim0da#VvjJi0VTNb)~ABkrdNu)@pbtqfm&ZEK-LedM@j`m&ui^ltY9x zYUqSB=*7m)KN~D#h|PCdCLGqTe#`8D?%UyXpT>(>{7m;HF?z z_x{>lX6EbFpGrG}ytYd7@Q)xVq7{-{rM|K3WtPF%@3g24mcLU9rWz5O7c&dc!(HtIzlxV+J5)&_&#ZCz zezHaTL=#e_<<>Wmq(NvTe~)81nZ##&rj*j&aijOv9UD(6ApW%UTGGCgeU})>%3NQ! z1dVgIbJ)}j15fjI9@xDhri1h7%W4sYvhGD^nk9FyJK+yTeXPE~*@M-ghSY?Hl7g9x z<`fJ?C9pCp#?q->Yt*s~_iruC4v7mU)SOHmsu{bD*Gg49UYWMMU*-*B#nw(8?&5ZD z*{$+P+-oZG(Q#8QYgG=iyd%UG2tY7q6JtkNr@om$@Kocf)}Fp+bLxMR*D8-zDqP zs7hZ~nF%h$xZw{zX^IU@)i39s^XDG3@ML-e8fj)u?f=UDuzc}R3v&4Q^}H*w`xAqrXiv_U!h)E* zchfe5UVUkXoNuvgtQghXw};&mW(+XCH z^BXirlwS+BFfcAVzQ3bYTFjUuXl0lfsl(Pv9kHR%(1~+e6>Xb2aCmKVXY@LnsmtTa z+uKKorLWQ#ndk7^`gRTvdP^*8uGVZ_^YBP`P|dO5kezG>{P^t*>odu?61NH+Tg~uh z8BL0D?H600uGf%am_(2rTtO@6b9@pJ9O@G}&KFTNBbMLF3@fAa%mS}sJUs(s8AZyE zRqu`<-*!+$zEvrzBA2?py>lRzb+Ai7FR-XC$hwUW7$y<9w;mOh>XvtbTBf!%-EUio zs)Btc7`UdUt8BBk%WJo-Pq}B9N~{{2z;(_+zcmbTzoxvLFPf|%PiBx+7iv1sjN&y- zb{n!M#f}o-vx3tJ=T}RQ13x6ikgogrWM9^#aOX+(-&=#7q7W zG-ezF=8*tX(5pl~F0wy)teMXby=y8-EQvWbL*%HQDtJyaNq zJOGGVh~tNq^71lqq$aS+?lhi>ktMVV4jd=W^otnt>`OUwLULt=(L&U*g~4HG{;Kdu`Mv+k3lP za+?YC9kk|``c^$diS%Qzf00#@h-L1TXB3X8$$pK&q9^np?xL^5=M_0msSm;^#J`p-^7#ZCmz7; zATzKY#Ylvcd!)p)3`{`(s{-I)g4OGK6qCanikkmT4S(B0orS7gV+MQrrCkIU9f$;_ zl7J$d_EC#pz6hquA^-2J|5LZpc@a%sDO@WQmjCYtQU|>F|LKpvt^M!anSzb#=_4E5 z*}gDl4|6tadNz5xd{`OSSBi;azKP6?E&%4Yf^suG0?W9Kh{&1FpRT2c(~+cy6()@1 zDc66SFui_Zs%!h1JfEp}a$ZKh{NP9O+CcW9l-)r|qx5D4EpaZ7QhjT_y$>nMgxjTt zWPzfmI>LX^o%%CX)xJ0^qWOAs8~H>nzlxPvy=zvpDR?nSKA@8>!jU=iwD*Yy6gZgS zJ@$%)2(`{Fu4dtKY~y!8mo!*shZL0ZZjwcd`BQ2howbpNI{Ka=pBR9c2=+V;&C(2~ z^74AI7XqQ;ANJl#_>{HBbxV}?!Blk;p}P(L~e#gAff`-+SS7%{C!k=P#Y?S zm*;7)GN9eKo1T02a9Mp7fbYN;C8YjM-LZG}{pt?a6=Kt#U?2Z{)A2_a6MBvl3*m*0 zGG3_cR*A3fm!Ub5Nb{JHwa+c8_;5KD7FwADmHDqTj@l4vUxk(z57jri};;^8Toj?bf2Dkf^b{h^8w=+Qo)=Q{CHouidL7zkHU%cu(>k003Yp$jhh$0Po%|;cZdi-%gykrHyYV1Xn2q zO%xQAl`Yjx0DuahAS0>im37h%OQIe8ad781DMoWyR$kuH@=(&@2v}>12&R$@%Gw%p zVxJhRT#7BI|Dnv1RFQs*tPlY4PZ*=ENT0X6Lsn?bFkCu0Tbv+~%L*nGln$ao-L^Bp z_!&4bMfh-b)WX8^vFG_PJYp!njHCpB- z>OGwOTIvroKl?S+Ra_40EJ(2~D=o_Sw)~N4qXRS3aV6pH*FKbo^NsKya3l2eB7|S= zSu5D8F2va;8SpLNd5#4%xHu}yo7eMCQJ5`lYaH+@9~{pu#JKx=zqOo4t~fJHVYO-g z`Vh%#-{vlBpqAW|(5CY({Td*x$x=aiNqSV%_hHp!cV~4}LAdi5>!h$}-~dfU zYx1Wf?Ki_cRM7fq^CpL6)#ACV_W-1N1FFL3pr0gOh6x)oNAq2XXMelM?=)LyRd*dq z9^kR=wU))u8v1f20ry`E73WT~-;J!ruzDvXDDZhPty86arSPe+w=5^mb^Lwl2IJ#chC_yppR$jw z8SfZ~%y*()CmJ&w29p^g7BNQE8tJ${wv}{p=y(`uKX-rl)T}hTEDGwwZTqBg_vV20 z{8SYUcBEyZhDcAh&xWg2^}^yCFg6P&&%wgO*{;T)r`C-+oVNd^Hw2qosW+MF@^$EJ zX143dA`<519rAoey4AyL&l|ZqJkkTe;lIweh#u#-L*e~wpAyB1A*!Jf5%qd+j{n;V z1p+$STd*vZuGfR$e`+R8NI83llG6=&O}_o2D;LkY8qYb0#XL*{PpUEcAd@eD+} zzKlRoJDz`}*^5tO z-yr~7D-azxp2vsC^W0<2@P7xORReaZ4VnZVD?K;g8Qv76iEQ0A0)@uoO8;Y%Wrupj zVV@z)<({?%c(U9*)CL8gMZE_xJL@_7=(Xw-9)l-ZV!=qkw0IuJPy> z=mqVzuX8G|bF|q!I}<0RECFbJ*mLOrco({l>9}ZtR0A7u{hksjKeMi?=4OUS1OcSV z6a8iLMAUru6uC+c-{h6tT3F>Kew&>zBfJpfjfKM*j4~@`K?6w=byfRxRR45K_b=ws zx?{g;(AO^(xbgH|s|a931rJ7BTmFs{3Ok;c(*qqmO{#K+zbR&w{9X#6k32TF*IP%) z!8@(E@nxgS7+eA+AF7k*mt;gt{89MR;5#ZBj86fMdg^kO=F>-anx6AS`g-+Jpa>JN z`QUGpF7poe)NlBEyk_t`zzSzlZ(6{eYGwo(0wbc4CvOXPJ@5Z1-xd+!8v7m)AwO>< zI8+N;BH39Vo_wLDF&+8L3|;_lXzb1Ujzjl+rw{&JjS&WmGB{?50CU#}&6G8_6^TzP{FntFWZh8!a1=0WowXB^u$uLDYCgeclVBB7 zw2+gEb=Km2TYrv&B%oZb29OX2h@kr9IiW1`0a3D4^XwQ(NkVpX46XVxo&;*^{P3%D zUYwg-hY7Cn&v_Zrn+a6P)jm6hID4DkX3r3^8)TeNA}y75`EU*;c~W61zw3t~?`@t2s}`jlm51c0x2r0G2Oq9fwD0Nio@-&D{DavjOT9sHht zzf@I=rjJ)3SH%J4j#=3g$*_pZS;m=BKWw9m&6Mk;%@scPp+|h-|G7lsolc279Jzhq0WDDM$JsxlvxJ@K9 zm1q)^mDn$F*Sg!eSvwiKxfN(K&yC=dh9d1!?S4wzY~v)3=d2Gi$d=TUSC4^I1`|NMXar<3!w+}gmqPP+IAg!+#{IsnQq{u< zwyTuTfatXR{on}Ab%sZP-tn`_YF2tVs6{g~EzA4n2Fpmq`&b1WZTY7g4Kx!fIA1HMcK5h;g`T$j@uScQT zm9#0!luzSMQr{X`Vfrgyi*R6eVfSJE87{~vyv^0|O9Pjc?Oqd>xLjVU{k8gJNXqEF5-Tb~*0WZM`8z;_s1m_^f_j&QQmj71 zv~aXC8d@489t9X`f;l82&e>+~;ok#}AGQSQC4MS|M*U-hTkS!JzV-dRkfct>__94O zlZs$$QxrK<%<;UN4(hlZDaR9Fz~itvx`i%1F79B7y)1qV$3zc9LZ@YtH%3@At)+c8 z)Qg9Hsrgahi5PCDZ-dmeWIrFriq4W;M0Yv0c3KNfML<~f97-w(DL7Zpj0^^m zYkeyiJiD(hy$y3!8CA<)1}WHYW0LoeuU{T1J1^gYN897s-e_uWz*cCgkq;MZdgq;7 z=j+hNK}_h9L98Mspoxgg;%i4q4LO$70`_k)kGJ_zHvl^U5(+P;72<~@mCLP`5y=}A zF3=-2CYAnRkJ@|k4mHHZ`=noKD&&3bLJlGzc(L*OurVYAk<*rGKyS(pwGWYfRjPaY z3hU2gq2D*%n!2pE^R@6$uXsEf`8+jkBs8!DF$)8v-3)zWEdnESqoU0}SB@u2sN$s%k{D z&1j)7nVvEd^Z#aCVoYz%0F7a+3Lw_{%23pr->%V-9)ny!4lQW;ZH{ zMh*Vzf2wcsEGIlLC3k_r9+?HHFErmmpN4=#RBsw*X+V*U?;l*eS3H6@Hq}@R)2su~3e(w|h0L851b}c;%oa-8nm3`u`Eb{#_0kE?5BX_!?_rFd`HS&ke zB{{Yq&FtP^>}=fMREb{n4>q9lcxFVRjYzgC7QuU=^nOXTm();MQ2Q3;v#v0j(r?Yd zMtwn*R*8ROmAU^`V;S9Tz&_Mvd_pWgAG6XcLO zDl~w!pldGRp|+uWHrex(RKMAByJ%fqUxGJ0(u7{{0-x|e%Ri`&+tM#4(XJMh!{ zRRrtQ;?tw~7Z>W_=3cF~_wNaJf9c~mD4ew^9X`3|`A&bO`bU=LexqnB8d;rp$OeZF z*seom8NTB+rD6-a&zeJrJL1X_cYgS>u>MjG^%vlr>UaisT=&c6JZ+nSJ+(0Y+BK<{ zg7FFnLdCjqhun1Mc<0>K3L$qB#F(`$F+FYdf8|FCO@|>~T?bgb+?e9t))Ak`Ee;GU zkPfi^{<7PENizGY@XcLg7EptC{iRtY*t$Mqor(H*9n^fh6T5p-niPFZ64>wp`&d5A zg0aLdb_wH7NO7nDJZ)DKwQy|_*y$6?6BXfa8TlT2k&;A@hAkqTZW&gni}=yJun7^a zHY@D}S^g59G%>`!w<=Nl~YT)=7p#8TZWRwdX8M1h4pz;R`F-E4{=u@ zY;1{i9#>17Vl9*AEAZ)tp#!mpn*3>s14BDKL~K)}7w8$nN)7q68JJmj-+xu*cXwC} z^1JD)Tx+HD+I-o#A6ST`T>1t;BZ4Ci{B!)r3%lMH)28guFwv)rRJtOs6>xP!%Zx0^ zG_*5}Q!)o9Z=1DI5g47)Qs-W%S`+BpXk=Z#*6ecMR`tDzl-gdkBh9y41nsYO_w!)b z-PvPBz8seL)rl&|7Z&<@ZWZt{)m#^FAKnRm6$3At&pd6nThT=9autgl{dogTK601V z>A4?Fqi8)zWv70<_s(NiTYRuKkvSzJ#qJ}JzLTo6kSv&=y=_fPDQ&*VA6==5jl@`8t~(?Vq*pyk6%%<%@|lI&5&;K?)M+&*{-)>a%P1YWXmAKk>sZ z@b}n*>)he0+MJnh|9HOUy#!W^OYyVx8N)ARj=L~fe-a!!T!@ZM^trk)&%5f6z0P3TulubV zoo=Voc%YZclSI>vv!k=u2mO3~xz`7d*N6HAquk8QyXv=&JTIldBdtbVC}bOk zXqmm08N_RY8h#i0!$Dk9(^vs6HCF1fH7QY|n>w|J&Y4j0@S;`BLW5^eJ&O?Btq zx_;pfP^U%uPU~ETc&O8Z%}R{x=#o_en*9vClE-%Xxjk_fF+9C01uN4Y+Dt@0*c6wY z0F}daG-b_D2Znzr_7;lHfhL&TAVnkwcf&xk$Brwhv0GwAC;P6a+1 z(H~KQ%c|6G7F2gL|I)TDBpNbP-kVsgNsrNDA26+`|DftxjIHTgf)xq5`fs0vTKS&` zTYa>BWbV#sZ_V*JDy{N8thqQ1o_qgmyV-H6l>{wDu@Dime7nU;@6b?`nwl-CXBp`? zdLgGaPjXZl)B}TriNdj~DDt~>NlCxOZH}|G<}1~g)7%c%qvwDthFAY@F1OHtQ zM~P^e=Ch*zZ{M$DWxBN4bRtHH!UqPWdORsf1ERi3jFHD^GGT(hG3;_Ova9Bi2nqJP zaS;RW9VpvHj+8qB{|UjJP_Kh+gY5FT%$?^@@MiAIJ7HTJFZbykfiE0lu)eC-Wet%A zt+vTq4z@7Xib2Tb_osLOTtEGku(0+S0pkkb=jrJrB=t)HdgWMg1ey0FTFidGtz)~} z_UtPx;5oB5+{2!nRLITvwOjhpGX}cY;^$Qj50Io=VaKPjixkn;R=+SUb#y(p61Ry- z`1Knjy^2WKYA|{${7wbI#0&DMNZaKn=ke70dJ^}#)A+jIK2zLzmEBpHlJEF3@KJ)_ zwXbLf9zsX{RzV$Uq1iyN;hTDLX-487kQ$2YLQTN7$Q&SEP*!NGuSTai*v$6w;+W8} zT_Qm)V8Y=dCS8t-Cbe~S5mGw&3c8nR-8YO?zk0OPOU3$WWz&Q|OV&+Z{u&@bZcU|`_~7M2B|LWfM9*zeo}|FL z;a^~_2;VBd>C?gkL?ZNcf0?a6O24qZlS3}B{l7lWI3{*MxX@R_=?mNOUI*?%dv-< z=miq3eH{wYp)S-$S>#9?pF&?j#~51)tx42GHN3U;aCx za4X1T9C-+-sr7@13~MKFhD83pgIu9E$fQVai+0yuY-cHDl}D>aFjgnj6gH>7L!>SY z3+CjdlRLq?qzS2SF!GRL!WsFf8cB!S3n$u48>$*9GbBLw4yO*EnvemN)C?VPfJ@3o z3UIEcn-m*(_x&~!3>e?f!gKsy^C9lLCPl#RKj}fad=!rCv=;wo*eqY#CLJBFnH||+ zL;W9>67Zezp2xH;d>R~Q4TSEhpK5Gur!ZCZrPwh*MB!P$|7zjoU!1>{Z#fqPPRdMp zkR`o)5h431wYEh^2T5xS{SDBCrU2-wIlH5DM$w%<(uJ9o*-?-&|M-QLI-woJSiPF1 z%JCT_{5)0I`Ph0;HfOysVZXDZ4{bj<-wJ+ga9j~?DGbmMN8vA#}Deqt?HyR4UL^qM|;nZ6FGUdSd8=~=l5r0|=D7$^#kp?5x z9cp|eKkgRRLi$g!M+E3jX-u`C?Y$GJg~Awgb#5HbWiG#ocVK!u{IsfBh>L!m?n3un z@kjNCkcKgOJss<@@d*{>&uqNvX0}p4X65<@((1`^?+sS#{&$I(4{Y2Jc|8cY&|auCdVX+xdI-1} z3}|u%M+P=g$XO^!0M%_48hpRx>^1Cod&t@^aK*SR{9Sja%H0_L#XiPR%0)LC7?ByH zQNTq~W6z6&iKxbej=*69GIMBHO3PGbxp_d=Ua!pdIsk736d9C$k9FucSpOI&{;-(P z3vP5cJU*@qo{P<8=5?wOT_iNh@2HcBs-1U>Q}rbi zbAJvcG-CbA@;V-H6L%Y{9S!qvfOYPvDa}98zSX0F48kN$6Dq+nAt+%c#EJ7sUO5Re zlER!cU{5THcB|J)v6XN+LFQmY=crI`gAU=xc@amy^Qq9xBv&3+SFLY9rme*k_9jr9 zZ?Tr_J`zFO;|w!+882+|aX8}ne(Kl}n_}qGVCy>yxhvRqYtM)K(%+yZjy<7$JKaYij$CT7o0TQJ}rE;jCsqH{@Vn&Ab{-CXnZ@qLgt~}PPU382;!XA;!>zSi-b2D3j?syAT zIpUssKN`jFyJtE}z6MJp4>rcw1KsX7^Ma8*Vxf!NdK4x5$99)s0PB3C{Q6ZErS(^e zFRc4$e)@Bm5;)XiuaJ4>y~;eN`3v+5CyMl)7y$eGr9$f>XJ+>Ic0wp@>5Kh%JlbEP zkdT?Dh9Iebj6i8=*;!4_#3qPlw)J__MO>zz6YrOwdg?RrwCRgy{!p}bQn0>TX>`z?&&RFn|++cLBj!XIiXAA zh{JY!ttLRQ^3I1$LXhvxP`7lPShdPB*I2vUJ`8KkT}>_dW=QcCZbDq41Kc%6D}&yJ zL(+G#DRH=1gMhWxde`wRN+bzs7Pc%l>ow5{s}Tg>4y2HOtj9fD;}Hb{$13^#c6MVj z(TcHm1^Ohl2TbY;mal{or6xVv+coy&hztLtW{5A4L(qE+PEbjX-tXc(Sw6+PC0&5o2_L@ zJyRm)^pDo~Uk_l;@xpP)$d4|1DGHzmS6=~5eaDf(a#}|E*n$j_WQEpFM|b*z==YoI zy751U?<4}6#s5)e=``s^kKm8eo;~%KUJ>TJO{=-F@axUCz6w|^;VZ(vH;$(fIA7te-uQxUWw9cZ&>10O_8@jl97y9TS=9DZ=g=5fiV&u0| zaknl4c6Avj-t`l1@S8akTe%}&ZH0Dy={NNqoY=}6O{!ZUz!wP@15b9)NMxrNcG+-qo}@Y$gQsfkX2F{+0MqxY>mIeb~= z9`-+601sMzLJ$gEb0L1Y=N!m^9AQ@2z1n~1Lu|?W=q*RUs+y!CHoDRCT zJWOfecg!jCnj00^}f15K6{-`&259`_?ZRs{&nQP(^hLzaAmQVV_-IN1JJ)nXP%9+5WHwGIH4>;GnekY!sUXqO=8H4XygRitTSPdivx|Oe@~;hmj(v znn$msVujVtX+WxNSTwK%y)5C0FbBGXd=Iyt$M>O6Rj((YtmpeDkS}yj{QeY~Y2_Y*_ql@WtP$3B*r(>2C8P*WN`A(9Lr>R6>+L$*Yn>@;1$}C z5S7WQt?DqQvRu`U=_wK{i*$UjPfW zNhe0-88Tut)^sFHPOlt_!yH&$b! zz%`DtD1+$)q2}g{WkWVO%9emPqjT@8SmO=Zsyj_Dw57;Ov1Rr7?SAU*qYOZR*L-|X z-yGM_p2&h(n|sY9^db)oeoL-$9+MOHPjFqnJQ9#T4sp3l6cUM-?zn=}GmDD-Jc*E% z2>9?P>!X&J7Z+3fWL=ctB>?w+sR&JZDP*siH>Z{Rc`GglyiZuA`4Q;h^?3fp!gui2 z@9~y=xzTen=R(ErryZL`O`Y#c8v6>w^RJ8e2(7(s-ht`NN`*SBfN1j`$E|gIWb=Sq z1+Q_JcP}z0@y{kkHhWpB1_>y}zXcL3?1S>sT6yedQrF`(R$4|h_^#I3RzZwWe$I$M zghx-mt%PtRpTqh)ps4HFbdBE%Ya?N!XZ6@JCz3HnRxltXIz7i7v!Vs*SP$?vGq`2o+%I)ix46?ZLO{_*$G)W$~dRvK-K(84MEFQ|x8*(iTR-!LHU@&~RSVBWNwFcRnU+o);G@PxqyWX8$By0tS zQ;PEQYEn=D)I_>Eb#ydt7z*ZN3WaRJxkM91TDsTw<&N?b645Bmy8i6Yyn2 z$n}vrZf7+(+Ru+j89tMVRR=6AI``UfuQ&n^&m@Ngh$o61BQWBNN^1xpR})C|jZ$DN zGi@8h<3hOi^@(4nl!||@TF&3^H!zql&={I=JFe<`Q?I4`3FW**J)76m@7Pf=NE$2M8VUsGifuUU7_hWItqI^fk5~aO~Qe>FWY^v>K5+N(lNaw5h#=5Axn@({@npKkgfjUGc=Lyv9R)LL^8Y2ga|SO*D|?&G}1~0{uNNvay;R zS^2qES0(07h{)^bjarE(*$g(*mEPcLSonhm6plwM)u2;|kb}1Va&z^>tsOWALPN5p z=e21zWu8;#Bgt^2b^dD*mSTo6>zFBpkl*>C-$pc55CG@etchl4{9WPL zIin~EZQPd9(C;8$DVU>Ow<4LDj)zlcsU3WPfVB#KTAUJV9h4JD9fd=;urxkz{e;IA3WmzYJ}^Lk9p?&a@*!5zmd(b6YqWanOl#l!`|Ex_dz# z(7V_BV{_My{jqP4T^GG~WbFrggsNG-SG&P?XB#3$_-XUzZ65(0djacZ%=0>c^^Yh~ zx+KgUazIekO{qAKuWBecrfef@#%$XKV#^|qY0z8n6~p3u=G9%s~sZ;F1VJ+vz7Sfd7xw6q-` zWXTZORfPUP(sMF7zLtfxIgY6bx0gzRJsic{%nxpBwl2o;kpyKv4(vr<=ewq=W(!|U zEwRMNN)>ke4RhW$$f9*N`)SvCTNqB+R26Wk_jHXFz#Gd?zOXb#|7LD3=6OW-CoZIDVrT9Uru8saV9{0Sh@at zy;$t*V+N$ZlfOwL$iD$%An^jjsEXiH31n(~D1=jsv$3lk>Lm!^L%Y7jT z3DPtc^qD7*3Z`3h!`KP&Z7H;2(0iopyv^%qINh^ zHidz;XM*r39|z{H@EqHHd@fz`ana?O;hD6{06MjW8x*WP6xhz{i|>6Tey&IQS`KeN zp>}F&#}vw;jWZ(Ne6Wq~Kf^F+o7M?fUYr^RBh?YTwq;?|Q0vhB9kS z%pWVzRV}mB&dHL4{?9-@EDkTh3$D&{A`Mf+o!{{B@Q{2H)9pvOQ{Y)zqn<*`{%_r2 z*plvK4`-F#QpSv`3fg=Pnj2QVx@jtDvedT+ppAFJ%=9dt++q@>%f55MT zO9BP~L6+Q_x`KbKHWTE|#9cNT^85kD#>Oa2oXeD&{{j^?g)avBNoQsnwCG1O!))cC zEOB>vK~vlk=h%+vcPC6>s&|NK(QvGqzoy;HfXtamb zmy$qJcu!?&Cd4^LgIniFA|Nh;Y>N}3rc;aZNwiOLHHAELhPQIzK)gi|_U54C0f(ns zFP+CME6?D;hL{*KVYl;t3DV%(lXYROV~)LS|H_a^rfB$So8o@%`7#Urpjd$3s8HPQ zeiH?SJ1(|qHUBE1{dN?z>bt^GzXne!ArudAf&Dgj@3JUFz)_n9GIW7eJwd=d_Kn>L z6e?<50Bsn`K(mhT5gT$*I-)|MIN1n0iK`KDZ9QKcuzHaLPmNFbe-^_z{2>h=|CsIo2f8vqbZ8R6RGBd|jx?OfCk1Wzf1WF6y~7 z6XAsD^u~8Z-9_LJMAb1aQ^Os!LzxibK*tNCE_BbFmqYFyqy*mEg2cFIvEV9KyTDL? zUNJG1JaW>NTC>T2`y>>Cg4OObgak*|%PXlQl@sRhq}{T>ob3+;i`*Z8FGC3`nFEcE zP`saq&KJKl6?p|C^Z-O;OOrHmBpCjCO6Ufz6p=BO@_1LM zJR;TD_)pEU)ifjd9pULO+q$<1<1n|$`Q?sNP}}Q2e!vxGY8A(97z;+Ydg&kUtD^DD zuz+V${cz^$E@gxRb^`_68D-yTgMUyK=$a7!m;Na~zrvBN^kz@-4!7XH=6~{UA8w4; zpaecj04tKaYz@jBVfQkvVj70&>*;GIi4;b2 z{raq12D>K*B>2b^>1lM;k`ucN9@?8mSr+X-ll}wgYtez8S+bN80Em8q9N< z0V}v#UD}(xNB{>AQ;o*afQIJuf(DVwvCBC~FJzDse7(^g@NkVxX^?xpA}hEs0JNBB zl|SNqdQa5{AO8t{Y~ub-wNsm2?ryy+oRT*Z{T)#$VPU_PJ5Cv-`! z3`J*Q9{z?ebf*bk0rMJ2$)HQiH0*^)W!ItjmVc{$%kWk}!(xVgobZ>KNTYuEoY*1; z9Ugui2|lCnmqB)aD7fFD-yZg?`WEWlI7_Jw4NP#13CtjN;G}uYNE`-&Jwz}5C2aY# z(t6#dT5Whejuk3txW)*g^mM#j?T9?&dYudfBg;0#ZnCI~I$d1O?hXGrgyVEhD@x?2 zmY_qyq!cyI-`vK3+V1z{FlzBRJAv`4c*ev5FBbE{DOW)lHJ%($vDU*F&ka@Ie?vy? zz{hA%oBw5hL0J06!{vn;a-+)l0~UwT>qHzVBQmnLWJ$i~=ffHD!2|MOu(|=$%FuoM zm(#jJgw`OGB#fu0=9vQyP+FE@$*p~j$%Y6tM^$~%#il5Bn5pe%f0)N=B3s0BPVH-7 z@~C75%^k(!L*LHb>8ko#E1zh!=S=CHBHopITzm<>RR4!YRha01$Ra~KTi?RjklXp>zb3a5j)>(GpdA+}Ns@6PSo;*vl#Uui`SRdLNO12k zzq6AfRqQX?cAUp!?K7^dCN9C`#zN9>WN`MYt#LZdj(aOS8HRWikZ3Vay*_n5)ge;~ zzGQa7Y(X#6JFNqwa0h!RD62R%=~K0V1P|s4#na0_9cwg;F!2-@9LU;d+pr~!@gQl8 z#)$BgM43(WQK3~GIyWqkJF4Lr#GFesgkAl+)le9FV-zGpUQ`F={0+yYumOH`rFErA zhj}G>abA8}m`E5%g{F-{yL~X(ecsn|#-~Ds8 zmkh5*pnwC8m#1>j^N9HC!7Cm@JAz4HdqpJC1bgVIc zTx|U!h)P#AJz7?`gp#5==#p+l(eD!2HC^ek7W+fSglKjo>>Z+4_jsw$qCDfwiWIA4 z!-Fg?2U)1(P(#8*5?|p@GGu-fc(W!UKQn8$^UaPoKL&ISHu!vxlWe$A_mJkIz# zUZ$20BXh?IdE~)Z_G<<}kH1f+@`7_t7zZ=7fx&!5nn9ir$q(;l+(lnU9>pIn&~kbN zYYNq0(HJKBoXuv6x*suPlhHj+$jC^(6BM=8{E$t|5)q$k^7iL6@H!t_-^FHrUKOPD z_j(!73|5?b86Ga}4jeD+m|Aqrpi#&G#$a><2-M>acMqE>#XK(`Zfpl+3#0IotJc%V zh=@dQUYd&ognbU54xJ{z)aEB8=GJp!i%s}N8 z6serm7l;~Z?%~j)iAgfP)aBCLS&|B)jS%RgAo|Ba+4;KY+WFKEdbt6;#JygNW2L>b zAqtf^qs^VeT1Wr}Y(M3`!ayCjBP`k+AQu~D$*WmpGzeCaDaQ5t|n z>_eaMmKP2GuZU^3Lymq|nS*DouUMToa;k5Hx2e3<=fgKMSsCQ_G&r0eL7VV-oehqV zp^Z|UU*v87+PiCU@oXC5sc@3GF98A>!%ji zweE8_`+mn%zz!tdsIhPx#fC%*J)JbC0y~5aDOT}Y{Ri{HYYJ;s878vR{a>GLR|KHn z`rfEh6k-<~FW-aMb`3KIOo-@IbP(Wx!s0jC(3hucSc^=inH3`=TLhbZ6uat}*z2W; z92OAOcyhf&GgO(Lrn+0=qeF6;K0*%j*KqYNgb%YB9Wo~M>9UfABubl~zB9d3BV*7o zDNQyU=lqr1>GT5fdt^{KIr!jmlv6xa6o3;?yK2CfZcE(@+M_1# zf_}w**FyPA6QAgXofQ9f;N7nNokc z7d8jOvKI8ZMX|e$mHkGh&vJP0cpNqKM_n1ieQfq&+3&@6@At=eq+cDC`E#FPk#z_3 zZ}g@~->bK{U0(%kE2Ydg%OsbOz-?7e=F@~>dM$nK4%=#1otYJB#3lTvgcdDuf~-746u{!0B5-Lbljx>nk_ZH8;9Y32JZSOhv4h94$qW zGSivf?^(SmzZgHRL%a?pI>LHj@mLo!Cp}Okc^84_1UYPe*tK(Qpe~IhlcK`K&P(>9 zET#i_h3M{FZDLEov!IBE=0q7K=@?Tb9Mqy926myc4nc;4GcXgn$3)j*o4!#|()~$v zloG$gak`SQk!ex!KG9eAtOv_Wjjl6L6SE}q&&ZOiVc0|o+Q9F2XB`vDofo)w{#Sh@ z-PI~9bzGnSjrdaE8pTTCd%$z=S?Tt+p4qe}LR}xb!nHUqg~365LYsRz3NQ73*T!Et zxAPtC5YiHQ;9k7Wrr%6!Gp*zd!r4e1NEYNixbGnSdFyd z7$NKqaLK6K_MR4TuA3jesu$&F4Ib;dX&!hw9IeWF`nOwJH!1e|*YA$}X>H@>VIL0L z_~Xudfs$Ka8-8-T?-7Cr>Z$ABN;c{#RY&zRs1R*?Xgy#-7*ZcX6{ck%thb1GEoWX_ z2#Mr>389)j#?Z>Gj8FMz#Z@5g!&M3MU+QKJK$VG9d!ir8JUMUN)v?vImMvu2CUr~O zemZwsW(k@~93Ig{!M&-6}a}~c6=PkXfU@Q(e9Be7&B7eyfxIm5{%nE)TOKEuN;Z;eF z8C}{+YRm@gd9*c!LJL`Ydbt=~f%sm5_(WJUbPxe!@SM_h3INGzf7Scod1z?ATjeV| zg&mF!xP>Xo#dcotxiU!q5w|gG_wta1iQnD2J6#KZTLut_hsng4`Vd6*2ac~SSjM69 z%)!^K{^CrxdgH^Ax_QTQHs#rJ`{^6<#X8v#<8SdcpZWW*gb(oXC(R-U0u4D@#LC{G z4*F_v%=e#rK@++tMZO_5B?{NxUAq|J97SqP`Da{!lpa=iDvkGSHSw8TuE)-amlK8} z0}T$D85s}vmp5V}NQ}{c&4l18KleFyK6ayVjf? z(cgtB@nH$VSD1d8am7?A=NP|)m)83X>wMr~rDPLYQMPjSKN3ygT(Q_V&A!&!W6(RD za_~lTV9>R_Iy@{}XtavazlYXx6 zc6EEu?&s;G$rDDsPEB4EB}W+n%(NY+$Abn0JGDGjvL%3)CyI zqa$qNLs0|Wy1V%<>T*Tn^z29DawtiTC@I$ectUgX-->ffbp@-$gaR6 zTGm>_B?`^hw{1f31o3XNSEeXd7jdN)D@Xu4QH(5YiV<0c^s^gu^C{U{<4k3mB}<JD-<$CSNYw?Ogvumq*ptlfL6(|A#B>j+a?&|D9tH5k*KKp9B%&(kB(ML|QnBv9VPl ztVM;Mh}O|EzvHjZixUkfq;gxD!@>q%E(%8)Nk~UC0a_)_(TxI0n@vC z`VjedGUrsOlu?kf3v0pnZeP5d#}K?P#`fD9seRSxSY0~XU3#|Y3P)K=Y2?Pa4oR?F`d#+gb`hEQ%dY?lUP0I)GE~47r*#DWKQicCw5vHzVFKHciJ^z2`bhb2 z87q0G0Bsg&%qEX_#hb8g>OI z`#N4HfA6t+b~fHQxaGsa(-H(>3x+gdpt94{kH)6e^iT+iV)0Nj(VtY4Q$LRT+$G^- zhU3qRE}DP~h;^gszSZz6ZofALEdhKa1$)> zuK>G*`j+oS9_Z#F^#0XWH@7M=`WBt}n7WL1FFmS^(n?6mbe$ruOkkh63~ff~tJy%n zcI4&l*WQ%O>yI>M#}(~M>VwfSWfs;a@-Z9;{AR}()~crn{3D{Z8~|~wy?U0>1lx>M z+LU2XcEU7vT!rI2sa{&BmeOcGTeSbc0?)u~FsDsMFzOAS>AFec!jTbO+jDo*()H5z zz}&$Ft;gI7>P=#dxI>D`?_A!idpsf$an!$u#{T{$`9FSp2=l1*y4VBoh}KRTwKxpg z-JDPNQa2tZJaZnOtOB-q#9@mU!2=&anxn9tpEMIfX_>39C!HT*A_@2qRH>q$b7wa` zB?559&dwcbXi3V80pH(S(8C(x6mMAokJl3g)l{PZs*s8QL(^5TMcH-T0frQ5X{5VD zLR#tW?(UZE1_9|Vk?!tJi2>4AdwGNH0fB3zlqg`D8|*`9Gvn>0rpO*lyVd(14OczGf_W8eP=R6p(_FO zgkt3Fo(TGzfmO>e82_)9SFh7YAz@50U}U%k3X$!$vP32s1rHOnU^ig2m%88Ho-fzE z#drTrgPy~H&gEZWkKji|&XFj%HbVphpeaOzzqgvyX*rB=L&98PIw=49+#??EvD37a z8fG;^NtXPG_=lJNMjMLKr7SK%!s$fnGzGhU60q)lpMW7Nd4v$pzD(Ywc!(Q z88(kOEq;;(daL=#a$6FwdO;0FRmxDqL5az6)a839+=;Z~-bIsEH>#eqmN;}(G{pw2 zaGX}pCoJxJbcq9TTVHbH_ zkxt0v@08&MZbyi0^+4$Of*w)!XS&L=C6quC9c4PDE}gJ zjSmAFj#?tx3=UFhMv12%5iHk(-cYlejl!V3ahTnr5Me&Ec zU3B?b!&E#LzSziFtfrE2tgo9w5r@i*;S015H!G|csx@Ux=Hy_;Cltcz1m=Q7KQGvx)UIi z^fc#qoWR=Ip8g_?5m<@(m_$%X*@TKU;Uy%wnu=X~RSSw1S%Ts~BLGlj*fx1KnORJ@JvE2 zsaAd)r#uWrLJVjQ-`c>*VYThBsl#iP*qQIHQnRkdFNn)V=4#sqzU+P;Em=WV&gK71 zSQ@%r%gxAW-JifC*wsj&Q!&8>dezW@-9n+pYpuU*(CwPT9iurH! zJHRi&6G=QH*>^6coa~hV-)XpL@dRuJwNPyILd$q8T@#c~@cqC*R~;2aL&)Wy}^cY1X|_K5mNT)KkUpT%*7-u1Li+&&B&n)t{tW= zrtMAsg>-U>5e}~POWA%ti;lo0nU1!oGivudz9D}ZG4|gxer_rV+P#JW;T1u1nY2+T zbTqu9Yi{YX?hpsl&%=vDKe3Q3vl2^5#_JcCdsT)^ct?e!@TYU_W2`z#JZdc=mpoWp ziPVS{UmIjD>4!WqxxY<)a;y=##6smGUF6Cfpg35prIk@8zU!T+{X{DTy7xN11>(=Xf5OPM4>V=1S}ldUt*PZ zn&K2{Hv`%4iR5J@(b|r3WvQF81J9^oh}xLmc6^g|-y;ZyA0Frl=9GCG4LxqTVEq$3 z&!?70!=jv=pbwp7R}!FNEnbmB_m=*McXPpb!Lo%6Z{V{60Qi*t$(=sTfw!oPQOS;F zy6CwtW)MRO!-{2yRoUX?nS1=lmSq-8+9r(Y^0ON|)b91%`~J=xNlT& zhsTN-cCON!I|eXWyB=m11f!}lCC5HEVwLR9{ajbTZTDE>waT_=UOe{H##near`|1m z_cAuR&TBt?-My`IgUuX95jZO=wy60KhB>Pi2Y;9gLOtT-jY#TvzEu+oF%)dq{Kuiiw0_jR_3~TGak7 zxni0Ss>R$yWRV3SdlAge0NtO#@c<3!18_KJ6E)$;baMP)$om1fMVx9zEG(&sOq=2o zb_989gzJ)WcOZk*1V2Z9vwI1U4(AKedpn~A1O^2q=aMOnyg>l9r5I0ZfQgQ*#OgqW z8`9Bn?QJo^uY(owEnpbxkst{YL-YOO`&FN}bfl3Kl=>OoK9Zkg?C_wC2lY&o4s=zr zs#(O#%Ij?R%mxS*PwB@M;=hACKWbZjZ{vw@f%ZL0!rNPg$h*NM4M-w zZ#zNHqDo{U4<&W}I~lK!qV7WWzdDL@tSWvzBOOdEi>Cs}QHipT64l{DK1|V}TRokF zg5KzZAa!rIb;GwNBg?%H56q(7fwrvYm`J7pA&x5~7P~yP5i|%?v_uwRaNOkl-v7>4 z-!XqzL_43K>4Cg67I;t5+EVh^!MC-_1@e{q#Cg^@6_csa-8>;~cl(Y=0ZSss-k!2S z)nc3iedi{F7h4reVvz1zR%jlVbykSG?%e4w-LI?wK~9#JYWnmX$dgGA$;|R%$yO>; zBif=-oBO~0hQ*c^^op?WgQN9*2QmX(GB_j=bQ}&}6~hxhYr&yMpz{LXc|EwW*3014 z^|b`<#1dU`I{Unre<5| zK=g)ddEM;W&u+usqfO`cl1g-*7AR6})ShRFm<+)}`A4r`#i!q47<@s2Z=sC`C;i#| zispPbDQ9#}T_RMdBjp{!?31Q{eSV!)-oRn3uV+-bOrmd#_Q$G@TZVTw7mj5iSV1^C zj+12QW9NCH>SZwoqqKun4k)**Hik{uJT86*`=c2^R{ifCyr zH7hCxu7(R1A`!gD>BhV>isQ@J-wBxTdUlc07d_T(gN-j8YR~JSAVkRT-_}1(i5LmP``g#dNpkqU@G*Mm&OTD)vqVNcBbk zmhP=9yP4q=f%BVn5-wOY$2Oa+6eiEzHiR!^VtzKSUP|f)vwRZg!Ie4}Pp&k_pu#1@ z5OZR%W%51AkJ7H|ed&Aadb8E;Pem}MID zQP9=Q_VMrN)3?Ru<;kU`P7+oY(C+rc;Pnu5#`4qYX8csB12s=lNHRJt8N(bpm%84xm>cpeksiIKG&YpV`fehAr%zY>h6q5wxBH3gq zLb#=Vd=yx4?hC8tn{SqX2QXosV?iXsR&8gQq>*2g4P*umFTt!}S*e>IVHfwQh#`VP zeM8{+0riSzBc{YZN$GejII!>19OvO|e_8-rbul6$Vz5j$*Ih|MQUe0w1A9!D*k;p= zyYMR{(PDpSWJ%9w|6>yFf=6=85^iC2#p&K43K~;^ri(aK5eG4NB+NA#2_b>b3Nk0Fp zEENtY-d$QQMX=bU*djvvB|f!Ctu+Ln2H!4br3%d}T{t6@LnAH~PXDWkNik{hP{Xiy z#p+bR#CNy0H~*IE)>XEF%A&%}bH?bgJMIb%`n{mdy)sWVXafYe`PUIBWpygfoc0qX zdOeFb)Wo$_gs@b&;Cak#s6K*nBH4mF#feL&%k#j%{Es{h>0Q@p)iQszGnn{wd`)W^ zI;M~sn}{-@6rQ;{N>cOc&YmyOcr!jPjV0}U;x4`Ci+?@3PQZt>dCN3g6hB0Jnq1Rf z+oJdHYV0!T3>UKmL)0I%rL#$hu_36vb_7-;_=tf)K5PynDSfMQa{FfG*ImpO%W*-z zqc$2%;ugOUn5#(6#plq9biY~^Vt=vPP~x$bxJY-~0!U+UeeLLBcIv%VDU*oTg*aWK;$LSnG8Svc%;yHQ32 zyw&=VMhPt{qOrI~yT{tQb<{W5&Wz#x>0RifL)>fUkDJ%eo!W9rkH53`W?Jed(}WEa zf-fSsd78f}T?;JN3aW=~)B>1T`(Lq+_lI zb<6fGdZc5cnI$D9{0Y|9))^TY+FDxa85yUL&vywGNZvT5XI`<4TZmmkGpH``QlCY& z`!1<}1YEcmbUhq0*Y!M|kU#f<2{Cw)b-yd}6D*l3WLxIgA`JzHG<+Mf*Vbu+e>%kRbN1Dex6^CQs`Wwqb zuk>k|zmde%XotBrqe`UstK7H#QJC9W@y$@4sL2`}eD^x$zdVQ<8a(If-`2Z^thj2J zu;}o0tgUUt^#j_MVl)bPbPEUQqscRWlLLp=B@xyL0EPNsZ8YtJ{hRIbwkr+SxonV3APWiFv?&5Rt+_A{F)$I#8$(BcMS@|g7cJ;U6;IZV8`bQpx z$uCTsbUGr2Zr@+GCnVNC$o#gqg+4-ulR{4BCPuiO4V00N3&HB3T_Wy2{x*MaTPxSs ze5mpZ1T6frmc^X>J{2CKo&E=xFw#9CE8Y}NH&K-4#F-^ z^Cz?Oe~WN%9P|Che)#%5ASGo2ZH(#Zy65fHWrTd}cdEztQ9XDqfUJJKNx%?*>1406 zi2>EyyeeqdBtFDX`J>-9wKgf66U49LZgBM~=7;F>NP++U7A`hig;;PoKDTc%kvr0_ zsgqoiQmil*%_9bPGg$?b>J~Km>CJ^H%h>fuOai#xZ{FXkDr;+MXl3V)Jz85^dHMc3 zU7JqHG~nk$KIAa*u1zCjWA;<=A7JY69eLnlNRo4?9<;chx)#?$t*xdyJE7q%2-X`N0Q8u|O8;7k*@pWS}bt!APXVAMnZhF7BUM_K^iaHx=xGuj3M8AOdYWJhi5Q zN;Vy12S-|qh07$dX!CX)Kg~23*J^Lr$k5SPcSu-1vtVfEWn+x|WnVhi3i-#hLrZ;2 zi|>w_J}u~vSFS4n*&{iytLvu$H7yQZfdy)HJy9ruQNSy znNPcONf1*ss-s2)8pY%Y-+$NAAD;-`j+fJ613)h5nQ~)4z*O)6gVn9AKQSbWqE%^G zac?cDsqr3Z8EQ7)l1}BVbJlw^n9|(7h>DEqJG!_SlfUGp^+FJfw@~&PjB#Q-hxHS9y5Uy(rhq@9uU>Ap&1s z#lCmd;~B**lz-8ri{^EMi<-QXdZ0HP^B9aj7!m85r>2$V<#n6`4n$dGoz)U#LTYyY z8+T$P`s>fi7RU9(6N*}nb1ZsWB*|ZAN|KKP-rM5^{x|al{%5Dgk5jW>Rj~%)Ubdv{ z?S)<4OFF(QVevhnw`n^1^oY!#wnW7=i8E;U9r+%uEBE;x-w47+ss*~$#{3QrNra08 z#0(7_s02eWwmR$Y!U$&GuFm3Xv|2rE7(dLP7Zb46 z7dUblO!;Tx>UnaLIfFfnrd~YRf-ahIVGAoOC=1B#U{@#>ZkQe`HXP$#jZOj$91eB} zb5sr&7b_(q!tQi&ebEOCSN$Lt4djo8Y4y7g*PT&X^+=|5TO$>RM%`hpjWPD(zXZ;b zochLeBuRBk|3YWFYqODNl}U{U2EOup|K)et;FBloy?yy^9r& zKp~qPA0IED!PfJ5m}5Pi>vId@V>j;Y?&w&xYmpPQa5l29p)CS|6#M*s&7us|YgK*9 zQM5|fTgiFg%>iX)Wmz@pRxJ*4?uv`P!uV z;!Vo$QdrG0kA$OI$F~Kv#|bs@SBaM;U*k_0!#PYw_anFJgWH~!CTAtdU|A#QQ&eY{ zzVh7bF=C%=JYBA7qOw8gSv{J zJ&?dgYy{n6T5okO9M{btwwRtEWsS!1kjPmLQRrk#avbOQ>VH&NYq@rMND?j4VEi7$ zOzCG+yaVd;S+?rp9LI5aK6)2@zw(yPIUWu*AIuSF6XGWM6b-owOsW?26V=;_+5K$$BQkuZ$r>$cYEOAac?X~ z$`nsQ2FIZ}x#~2hD(8Rf(xbe+#SuC~SjFP5(`WC)#XCEDI&{}-xvmTj2L(La6Ae>G zJgJbhYQe(g8Ph)*{D&G;yA1LXj(-9D!FJ0_OMTEY5?59OM{*mMNV;%&PKB${9KK}J z%npXi#k{Ks<`;r#elx>OoSm$=8f9oPlZZ(O^}Av1<)!?fM>SE-`-0Ct|E``Da20-& zw(`1MJTF=o1YWE`m}fTvWck78VS@(>eBtA3GK{Afn~zmFGd&Hds9WvzYqk)_dK&~J z|H4l=*!dQ7q9cwYFouxUX)^RD__9FQ@9rSOIPfsb+9&|BXun)**yRbb_lO$W(J{Oj z92`^#VTe`A7dl>bsF;RKj5FCwMxaGTT&m0B^_;m_S(WPP4&uu9`K4QySS{LrYM6Yr zhB?I#r5B5oo^2M(=HZ4Jr0eohu$d2Gz9kI=_^I=up)!|;Csa6+s=sD+jErjKQfpfB zzhnU0_vc;$`xfnL{&yT596Ubf1@?g*cvEO#Bnd2rY3B#r=>kAj+~`B#XAszdhge=- z9+s1J-f#Jl`|Kx;kq135RH!3kkgm)>!=eTrIfn**mN||wQL766kqm925JTfn*x3L3 zyy}mTog^X>hQnmjwjAPPYxZv6m&LYPl&8pfJ>hzbd5K}fp;RsB z=E0{gqP9~ksuBOn~;QMS;t5zuYMlCl5s`Qszu9HuV37S#lo)BU_B`zfv2 zFRMcb)=EM8ipd3@TBEI}L($iM%bGL<D zc|E>bLt)qF0obi+67XYCZ2W^oG)am~XXEH-)VcKdBKmlD=D->k|DHk7SGID9rqeS( zX4c?$yq4eIG0aO3-p*Lt!q73#>%5w*h_J!^uN5D(1V$;*?2v06J(hdlhK`Jd`1GAe z8)#{CvukTGqr*)5z{lB_!d^Q$$_2QwOMHU7PQ;O%R*5%|7Fa5H^%Nnm&AaTpkS$+T zWgnGLv%(Kg6!9c8%NrLfPc24d9Kz|lQ~Hop{{w{s+vk?Z3O{Mk%a&wlhXWmcZT#OH zPYn|{%*p#-?~cH7BWM3TlB4N7zk@VwHiLGRSK>(1WEyU6?iM{qTiavUFi(iDaKau1 z3hYe~MVj^))h(ZHC9mSHd6C^k6HG+gs>?omE)$e>J96#&UDfubD0o~*NE|Tzs&xVn zkCpD%YDloLRzO#*E`2>jMW@bK$EDXug?&AW56|1cO3B5`r!DK}u)5{g!?(vMyEJUT z#NPgNu0)=_`zq9V)rKdOfgFIp$RkK2r; z4`LxIx?(ORI7?JSGz#g_NT`Qj2(c33&VFGqyWAwc@V%Xs1)Uqe{DUdK-v-)xZ^O#^ zpwIC&EvpnZ!>WYD1z$5bR%vcu*k5d!qMdqxsm>+9G~mtBWY4ZssjSa#eh(VTol3TF zE7DDcwpo$j6kI$N}yJ;zY0(w7sz7mjH&o6Mb7Oczn{Q$MCaW!u9^4d3WW$YRvHWV$fZ z8^D@xI1xV7P6ud^{Uo0k4)Qr@t|M&aZYHC=_4y+~qG~>(YR(ZHPoVeGs|i0?!@PNT zmV{TtBnZ^~BB!N7-CALF9^&zokjPO2AhkO|cVNvl?CmUpM_^dZT4=yY)`+Y4bcops zwS~^%4>@@99c5rbpkE~m_fvt146%i;vn%6)W$l|Q(!htqtLz~&Kf&~T>ptfm$dXs@ zZJu)wbbUtj4W@%Usx#Dxk zSn8h9dzUuzHUyi}OaAt{4t|+#tF1F~a5e(1QhO3(ym?PzEAQ$v`Cyj7H)n7a%wQ%( zV)bwF?|dvqh1M+yx3IfdBZ_%djmCu_OyUtPJGDtFD)^rLxf%QD`#fp!KCChwFBW_Mk&#)qUl}o+D-3-?PE&dBIumK26Sv(S#U+0c z1JJs98PUf640*ffbMC$#omUopdA^;gtE+=Pg&||YY9l`4co>xa_~>)_>-1BPCbuC4 zCKPDmSGzj!4G(NObKP7RH>j%qUk;6i~-nLd& z_}veuU_BiLGcz-93(7AG2$(`wk2689uzLe=(QX36`UhoT#TJnsQ|P3nx9OX;f8c!4 z7Cj#*2W!|nNoa*b^d_k!O7up(;AMe2=*jxN`({EOQ^=+I@5^0r`pc}UHz#=2{Hz9e zQQYZW5CXU~^f~&yiHzW-1ORRf_{mCRctB@QY0j4prvC`v^h=Wm?C&XyJU-ADc<(16 zYq$S-;;Ii5Ql0jFe?X}rk6M8LbmB$}5=@I-v@sGS;!5me)t%Iqo4%;F!>-FDWkB#@ zD%V0$nbSa(+kMOl)(!Y#C~u;Tt{{^djwgi>P3eh#nRsTsjAwn^viZdYZ48StgqRLc zjk3Mx;}dC+uZ^uAppPa-LHh+z|HQ|Onizs4Rz#z+L?uql$Sr2Haujy+IFuxYAX-(N zZj!o;Q5pAx0BT6^ESPjcLZ#?0`7;f_Yd1-@Bka|c+P=N z?te94{CZ&gnr8f%MlGEHAjF6&=@>AKI#v`M!82F&{_%iJv+Slpf<=Ugd(PoAIUdO; zLk*c^N8*M@7JB-V1S^g+VEvQmPNXnEy3i+Pp7d##pKnIURy>7f~{y}I#=&OzbThkkd;f`;DYoAmK z46F$2vPh%*7?ZPTk+BAjw%3$?y;`ekKcdFRqHGCppKs{~2Zu^q}L&tql^Oo?SXkNE^7&nNVWECMz`p(!w9sRd z8L&AUK??4W7waESiFyUWK1(^^g#NPYs4%Drt8kY^{X#2vu)`$xtK0QQ_q48t?tx4d z+Ncpq_dABiI^eJTG0y(tRIifPk)qW3W%R8ZB6uCOuX<9&k#+1cw$r&pEDVmWV&$a{ z>7n1V`C8huT^xM5Bd=!8lljiZU%!6nbC$mezA5 z;~Iujn4PkPrV-7BB(D^gHQ7W3hgyLjH>ch7L>>B_(}(z&-DN=1YxsV!m6g-+G~5UJ z$%yC9Q@CSk?MplsK)Sav6V3U&Iv>DXtiYyeySF%5^3207DuyQ^lw zO*Ad0PGrdaSo0!=c3{_U&C7TP*e+Wo|0?s(ef;jZe(?8{F)TrAx*?CNejVWY%k&ek zn^$^`YAGktZl7xdQb%>;5e#!U_$VX|7t?X7RzPuKh1{-&CYjiYX7)+f&N;K@pOG)( z26j9>AZ{i-r!P)e=XqA2v{B?4A~Y59O~y~dbz5~^v9Tx0;frX{7&xKa>U8n3vjSh7 z%&P_RkbWPK<`$RpL@>2xVzm@sZINmk+7E1@7E5u5mi^L_Nu@Ags@++2c7j(_l5MwG z+OMcXp)9*!|9ozsI)$dTN3X0?VbYyEWB2oF8kEl+{8Q_De~kLqqkD4bcR-G;vvR2R zxUXaW@++r}y}f-vfH3+HLu@yw=}2g3=TDRen>wz~c>`zZ0^rxSJT=j1=V;k6R}C-cy_ot-bhe(h5rc*UrcAD{L?*?$2bBb&)f>h=^RLW)5^0UbD7`i zB!!_pv3Ej6LX1TL8&5D{dQ>IZayutr>y`Dsa?N!ZR#D-*0^+^|S{(CdNLFx%gchSS z)(aBGMZFI@DUi_Eup@7@jJw$I&T#ud=ik|bUy!~xiMb4QEZD)A5&dwMz*O7}$dJ~a zksC;FTfRlZ6EQhyzIjh5JbHJl)7#p8aNbVPg6~lCg((8gOh@%Dso-TMqE`bhB>g8a zAe9O3Aa#}7V$KXzEY{y>Y^oZ+QUgFOGL!jW2{(bci*X&Ifzh~ELKB{}tU)w9NuI%k zJC0V_u8@(kr(Qz9J5|!yHg(Ynoz%=x(jZLep2#-@kb?FyZLHTcr zU`JLb#vg?{>fq$d*Aa7_l%JH)wZ~3 zcM84oR>NxNo_Cv2KfGK#yj}Nf(*39_fP%~it7ty9RebOXk z`FuW;)6+LFLhrNAq!67IfA2y?6^)CSTh7Fi8WAo7JOQWEgin=-o76O*m8sC7Bt`RQ zAdM397M~2qM0-(SCq0J}*puB&!0m00XRLa z%h8{|-Wt9><1wqTs|$m?Q{L0Mz>E0tE3-6i2a5%H_9c%>e40`{XTxK2 zrvAD)K>x{~{*@yfbcFSwyaA6;i$we}SI9;JY5L7o0~|(TEO8@uVsOU-ABM<#Z>I(8 z`d*)4}q4vhc#`bo8LBaLQdtsuxE(_g5kXhA0%JaKHb?P&VvOsWUZF{)00o@O> zhCDVOL*2`DZB!#!;wm7NO_EXNicrA*thf#!WLcwRC96^uKWTdn5*mR7Jl(;dkKQfz z0pX@kqs@A(?8w)n@V)Ww(2y)X8& zWe9kwig=F+cI=`;R|Q^u=d@$+TFK&v8fgm^pWm5Ty_2u**$J+W;YU~e9jU>nIW7?$ zktL~yt@_Q3Fj0ZF+4OLM7PUc-H_ zbmcPhDu5l5w1`QLE#9HyJIkv~@+~zldK1lD%%9?5&dU&$!tz^hV3g!Tbh2m8g6Tg; zfzO2b&+~qWALzEsk6j~%wY=6oA^4|!(E4_7$kaU>==t73`$gs;0S~0U@p0o5usxP# zGV?DC$j21^ht&_`(qJQQVeN6&aIn%GhV{da@ZE~J$a~lZqWpKd_QD-EaSY3bSXuBZ z<5n-<&4(9SY20eG>?NX(`Gh!YnYdN4HY-~@7QTRgGWvp5XrC9GYH_wtMrq>jf5)ZS z@DOX}qdQ2S*IKnPar4ehPnX)Bv~(E+M{8+oFE7KEiotfZoh-r_5lXjlhz#_V3KC3Uo*R|=N0 z(ueKcTAKbMs>-IOCK&wuv=c?%<9)r`;eG`Bb^0|+qoboICrrVxbyBugR-?#JLp?n` z9Ua&YSH*)dM#_-^8`J>ZbWPmx^qoc_yB1>4)l+zsP{5+LLW3$DE!$!yOkh*Vpe|hbewwF~|hgK9Ub>4-0&L*w`@W1Jf7L$X?jJ=h)J$OfIXrzZ1}9 z)0)D4Y9Irp2JdQvo0`N-x#G{$JGKa=lJJd1wEu=EiZ_zk_z?ia3I*0Z_tW|#I5XT@ zl%;X-hOUZhxd3Y)zkb6h3?ZM2LWd8Qfcv>ovA9lS&DJ!mg^E>{C2uHdUl#jx)?H3H zqk%X|X#SpvX0^W$m6gOCo_(Ee7gVEYW)%(hej8oa{)>H0r8x(~Engx*S)$vnnieuu zgq3b-R+B>xgvbg*uv~b$u`w3*$osVZh1Se2G;yl=@lcZ2v9=<=ln8nrU?M zTsHd_CM$7UNA|p1I+IQ2KkB}snrwt|9>*RqbGpw6XW;#AoN|8n2us#taOp9~K6}78 zzJmkHpkw=%HJBZEH-|Lo+35Y1ZV*{mY%l7+rLAt7hBfksvKt!&M?A>Tx)j~3n zw{0d6OIdS_^!E}qxJ29s;Nv6umn$=Q90RF#Hw-B}CRA<81s%9o`y(U2c&2t#^O1he z7myySA2EOe)lzWV#YUTPvjn4Fttn?jvlg@gk!5)R!Uefth6@5zKP_&;t( zeIt+oK_d313EsgzKz@UUa1mxs=8ZgH=5NYt-YHmG`#++{s%h1M7Zdj5ZuZBusvs~e z&mA;i#tx<*T08fwTV7vVJMruSZ28|Rf)@t|L)(%>JpQWur|elih3M5BGRJdD|M*BI z&{<}!iucpR(z2qmMXzT6Tggl%{$uzz`mHg6E_Fgxn@g>pDQ0A31ZMix)m6P3&+UG=0(M^*o@Z3& zw`_@h%@@;_jf5qc)YnKZqeB4%aAvE2Pu3wW$m#vJLn*ss=<+@|b>Z>=!7x*Zcd^x9 z`#aCSj@~vE7Tg8ZpYF9{;xH595Ki0s&j}UOnh0)oqimxv^3!*BKwZC~KXM;LVKdBO z4u17Ky;AcrjlfbL;S)r!N%2U$<5tY2Gl@XdZ~*bEx{B{+GIE5pB%uluaACbF3b&=J~aH)vDqqhS)@tKGZlVDeoD`aEz2}B-hp{vOSfMvlj-T{{ub2a zfdVL@Qom*$cwg#lv(v9r{7sH7gWjNdO4cw4&ECFh{@DB7x>b|U@mxt`P=WS16kYIW zkOM{`-Xg-vRnoEB+uN<*9cWZHs~+XQ2h<0p^={S|X@o6#j=S(mM7r_dyv@iXO+QU( z9uJd5nrgDCw8D5FSm9+%%-;0Zvr9JFpf9keJ2n61X^)rqy|Qst7!0j|ag#8&|Jn0w zC5c+v%G&xD1Jk^28eBuRf4 zW`A#q3!BX<&y)!lH!lEH$&`?L54{T|oN{=Mj&v8la@~)7p>QjMlO!~{fgZ5tFSqGM z^+I$MA_hD&CQ^QC%JL8ZhoC}>C{JM=p#0NB2_=^XlCFHsS3?I1RXi0yIK^~PA}z>3 zR5p^0r1r~al^8%a(Ug9B6ZI{3tzHQ|$k3lIBnaw&=axe>QPGn>k8=+fcEm_%wP zT!cmeDvA6%C9Y+X+BR)!?zh*OYrOyDJtAX{%Ponav3&md+6--oRT^*|)CuYDVS%{d zzbO0hb$~6UYt+>t?pjZL#?hb9n#td_2XeODd-us%Hit&GZQZd&50;C*1cL1e1YxM^ z{PBNcjzaWgU_bPO-v*gVDNy|FY=X>futj|NNqvpYWr5s|G^e4=JL*@v_BZnQOUUp1MZ^vPb9WS!^~&2GEp@-y_>SmW7X zOBM@aIRW@*L-xfID!f>XkVT*v{0FS+bVldPi=)4H!NAIDprsnT&NwQq+|W<`0{u(@ z&&vkr$L{+D1bPf|Vz1$9skF8VCvh*mEWu`YurPcANqeS04)B(ynIiUSL25yrLsNiId$DS zy?aP6Oy1o!)8Jc5?aozUc^r1GY-edX0(F21-PqaKc!}dV(4A`q?iUEee7O0KCgK~j z2oh31Rl;HtjK{6gb~mO}R44a6r&H<4A~!!V_{&_1e0cEvkESKXoF+)yjXwOynKB>kp0re^pe0l z!2Zx|fvd=Koi$oXYVzf`pAg@x5(khXR)xgFCXSal5(h($!MzyImZ^CuchHq|l%HB7 z`p0ldUku$6u&*5W@T>nsA&&xuWqqU7G3UTftGv0oZl`u3k<(GLCe;n5*ip6X(tB(! zSxXmP=dzWbqobn}nG?5cS}nM7`lYSQxLD2>Xl%hn1l<)$*}uo96ioQaXsDx8FCR+b zV%U~c)onV2G@5zZ`HIGJ#EysNNhS|rZF63TIuekgZQ+TJ2sZ5{h)l3XsVQ#xbFgqtWbSGxUy?NZZi-)OEGAfs1Gs_Q z%nP(dV^7tob;chBY=?6cS<#Q}xPT#M;-P%@C&wjw5@#x!8^)NgalKexWb?wgR`DqRBI*@7bCLrI1CTN zlp*2rhprmWZ*eFp0?-HqtoZ(Hj5cqQHoMUI_XH||_GiWh83i=a8W8Y8IZu;RGApSq z7~_vCGszwTCqzpTgOQQ0@eaDu$4sh%*pU$Zfj}o+cHryj16UD~!tA&S`;~!bz8fwV zCjhcYuwqU0s-kra&^u*EZ!!etkPUq6)^Iry9-Y@(7u5X3D*}?SR6+k~+;7Y{D7iTq zaOjLAQ>?gWf3FP4r|4fjK0e0&uyWBM_u#3jnrsAB_jN>fjo|KHd{;UBcKyE)bh-tB zl~0tWjOuqJoz9PM`c<87i%hvDqv^s@hkrg2zK1fN!Ww_=+=tV&1mpSVQuL;>09$e{ zhi*^A&~2e{3Nr0`wpZ~dhD?W|$gDhaEJjdBgF4xm%P>i*8o#aIs9EqgqCg^fkG_3iN55qF{hCBW3OyjE7e58figes^Ei9~>s$KA8y`jNFY5Md>Sm%RDz_x~6TRLt-GyyAt zq6i%dVKgR-94X@~N^!s6C(r;AW9o@k9Ch+&auwnIsq%C?(w68@?ob(m;$cPYj_qjt zy>}QtOK8bL5A+#FBA0`3y*9gGR~t*dFby{Acd_r&<)E$K+TEvcilu zmihxcY{x4;7tS-bS8m=HE)^B<8h6lOI=PNMe8=tO4y^Iw(*bAKFb$;7%%7`cXYzuU z9lgC)f!IGGxuDG!$L-65*60DjkniV$2p|-BMp7_SqP-HGO7rQO@6PQH6@vxwzeDIJ zq!X>`Z93I!6a^g~7M7m=C!&*2w$42Hc?FeLD?hRXEB+g&6bU>7s*jbZ=ef<@-LGRx zP42fZB+GRi_zga9yoQB^4(#EG_C5Zfd0c#YI_A}xl(*X+YCkj=kEo>J*CpF+t!aXh`-Ok z?JqkokM`p8SnoT5z>|hph9m?$9BEDF!6vGuz@xf18BY_5Uv%?B;LwKN8|O)_y$X5k zeJ-!w_oY^I)&tczCLM0q%=_0z>oAMe{N*@*L!vQZE+0)O8l)PIPAAoxK72VBeoDAv z0CaghC9WRH03h3J^wukOhTOISgtLLZ`{6vO`~&sPjtjc6mJr zwk&nIoc*OR5~^8KXWIA6C$#tb`vo)=Enns6>a;qRt^eWQ9RV82lSG4UV{#=rnd zM|mAhYOXfsE2Ef&;lu*xDpF==;mp?fKu}=GkAB8`GV4y!R|SQRV>I{*u{Y=_U@I<| z)-JdY|CM6D*3Y&HP&>~bGF+YZ-!%BF?yMW)2W85=2|})n{k@5Q%HG*^lgl407@ng0 zfI)7tp=5iUvbyVwTLh4?4z?ZGO>0wG!vO$r?y!|uaFdtC^xQ<-#)-CX!&Fj*jqRD| zYr0v^Oe21`)wMv(fRUhQjj*!Z3OCZ;9hM)g2r0$L1fd zj{|o+DA1?oSFT>NB85p?=rhb(NptA_tYyF#Atdai!Md;{I23M~|jSe!Hr zylYjO^-(mmUhSZM3xAiy44%; z*cX~bl68uzbFDu=$|k#_23)&z`|NMDlDws`bN|%-njz(sB1(mkl z{dZr)J<#DYoo0dMNyV|;RP?eKd^c<&a@D#n8V};%;$bl@3g-_SOu@#x|NVE_KG4RG0`qtM ziuwr-!ixDQcRK_by0kDs8T1PJ;y1}ZdF>IgJa?Ky4o~w$Zx$S_${`RNo=*<+D2yrc zR5Fb^5JeFc`=*^`549T3uwSHDJ&p&dw0jM<;UD-R3GQ#+_aW8$4c-UE1V|TE-VWoM z_+9jG4~wa9;_Z=}VEV7K7`Qr5}GVZl&(M zh$IXna6M4$@xW6eMVhopm;UqcEGEHFdpnC!R3fj5J}o-swKYU-3<`}ZAA#sjPHwQ? z%}m}pMYfeI<7-96w;8OyuhKBnqQ|yLh*V5r&`$8>y4Ycuk@J@TGo-w8H$=q#WTZFv z8+zW{UURbE{F-pp+nXwAtLM*Yo{HAdY!6;JIvSlmE2$I`hB6W;>KHjAl10jYO&Zbn zOvTyW64=>loYrP_9Y?*Kyi_SA#oRpX?cR;li_sw${*pk%!kO&|5ts%h)DGN}%EgB} z=|U_0v~M`t*n#W%uA(dY@ggU!mZRV4QY!R~xH$VUFJM`%D>E8eNfrFI0xB7Rp^a!< za@?xwq~B=$&;h&By>;#Scopa+x||g=kd%1R~)6)g&ouGh5td0vZ z%~~GCpc{N~KK>jvXfR04%D_F z8!g^J?mX(WpeO_>d{Q-qGMqrUumscz5i`Odbs->bw?l9-QsmBTR$f4=%`%p-qp?dJ?htYI0tG;+Yl=6Fy9 z35IM`cbGHE9^Hecy)iAdb<^oiAqQ%YB1Ud&?$r5jQIg)T@J-JzndyXy3no*J;?q#b{uRbX>0A z1^A>%v5T?$tHZ-9|LpGzNXyxd0y`-?!WkOXg4%nz?bWM85B(U;jx67rx>iUVEC!SQ zX|mZ(?nZ!TSL1>@;c~746!z32uvy>X21D|Y)q$-x*BoQ$bik0BqoxGMmj(!z-!#Wa z>u}UljMT1IsRY#94J1l06F3OCQk*&1AUl>o*HgR-sL9Fi|A|Cv0(aSQC2mcNDfL;2 z27$t?rh!#!&8Kwat?pH?W(qAuFHg@tw~nS&6ZRz~>)E2x($enx`5K@v|F~RyT-ap(_0myv+!%DE7&6=MItj&$D%^wWPS^iGh_~9+Od3C zpOT*nxlMktnY^hKXs}Rv2)QbF;`#9Y6e}H& zC-mOo8_{&UtqP2;5F}uVql;I^MicNGgo}WY?6>)RxbKNi{L?GfDm|sV+JQhcp|p-^ z4)qG#s&K$69Z@D;SOLyr-DfvUm^gRE;$CKfr2&Q3Z)~odF68FaY>HnTlc=K@R*5co zAALc^8(5h$8cEe(+gM@JT2pfp2&>b+h(^?6PepgfRAS6PWx)~vZg5V~m-~>1!^S>o z^|i>-kelJhHs$bG*Z!ged5F9;D_TOBMyhC1=K}40sej_Zu2=9ugDD?s3o!NFdTu>a zZoxo2v{HU28z-F5kkj%Y)sW!T1qo`zD^jmlkV1T6sc6$hCb5grc8q?0_-mPj^m5)A z&v9C&lG&Jy3a09Nd`W;N61Pg!Vt{dSJFjJPRq#h!J+bEVpZgE%HS8lYs6r4;>u z=>`H}RN|oMn$Gd*ZM&Rk5_O{Dq6+FwImzT#${H00hE)D&`#*`o@CMJf2SZVX=`KBQ7V2>X9x{sU>5=~*uu|z{N5|>ZzQ6xabl8MV zzW?RUd|bH(C8qWmVWvg|KhJ>955e!6jt$_Av%&B|%(M9tC(ymsWBrLlVL5 z=_c`+8-3a^a3cR1#sNPp;8hva-DyA83t!M@d9@d5{GW!kmi-3RT_f)>wiJP48PW;) zPlO2J>8pxg9HjsYVm83oRmG@~tED)vA*jjCjf{evt0t3B6z$H1uY!F3d5C3J&i>W0 znjDpiDYCMn7{PCh)&M8>Ukfw$5g%HXI&9X#x-Ke)d6>4$ul7jxpUl5+BRa)gOb~XN zanIqk8FESS3nWySdRO`1jT;O;2!$1eI4o`xJQO@!6d*en$frm1W|OK&NeQC~&?kz7 zYtNqD?`I9&9)ZF9z!+7a-?13ByT4vIEw>=#{9B^K#}zPvV#zZ8nB&;!67b%hx$E(X z?`p8Kt!-#%c3~OvU#fF>IPUhN*vi%HE#v7Ul@Z_}pSMrMlM!K_Q;%N4my~OqX5CMB zkHH%eBU$;)z&Ss%%w1ZtV26R4;PHt{N^=0gwphf2^$lF zD()C*gLtnFtH1ynold@IWQ{ATahKvavnx-6>5;;X3 zhHIyB^I&)A<6bO=oJqoHGr=5ALX5k>4bqE)${OdT zS~IA39aEW3r5P!pF41KJ=jZ1OCjnl)(o*8>`jCgy=k@;P2`b6pN1>2IpKBWb9yh=8 z#&nl)K3g?ca|YTEh~PcU*3HM{;NtFX6+|KDw}GealN)Vbm{n+u2$LuB%e>nB!L_Z+ zS&E%Fi`xmN{d091U`{>b>V@FI`#U(U(ip|@F?ao#7-s<(7{NTN~Rk%rBZpV7N*?wn0 z+8QX5Tq$R0L=eacO2;P+<;cj`YU7T^&%St6W;zU@S^jW;*F*YGF9T#vJdL{qUjXis zf5Q5VaRXBpEW<_Bv=WVSKL$8Cip*C81Q;J^)MoO#BAG0im(k*d%pQpRN?t3Y0>=p) zEzg+7Uvb#~S^F>aZeqc)ia6o*6v_bFD{Lh0Z=JNZHJKGSUU9vRbc&j!F>0UC2@8o> zFr(5Jqg@Ilg*t*KdDM>9Tdz)v{`^VdlVegA#e}%4`kdQ{zKYgS8qV4`-U#xwsLmu@ zIXf#JoOrqoG^`Hzgc8j&fR#S)Mh3rslOy3mj`Qy-T&^C0|Y9N z3qP!^>%LVsY!%Q+s;K^m7)@cJZZLXuU-ToR`Z_Lvb&F?_E*BkAvcOF^AVq+fsUe3J z^?fRyOgK_)BEkDBoK%gjL|YW?ChOx5Dm=3i3vbj}poWg;2yM3e z)EIJ2j*quiJCs|4uZt?W zNZW@~+}Oz*wo*}H$Q6(epp&N$iMh<9@)S)58L|513o}=8b&c8yx+IbYY!2(Sb@->2 z=-s<*-i8^Z?bQwrMHCP96-=P^E^){9@ZBEln;MjMhU**edFqWTG(ygygz9!5O;s9s}w3l z5pw*>jxLw*kqngqH37r}NbCb{YMPNR@2_%PI-dQt%3)q!AH#b-542Pjk;(EGg>qTD zyJi1$!^{xoMJW?j5Qc-oxg|x!AJ$JpTt*kh+nb5>zf5GRiU-3NmVW*eAj>q92~`_O z2rk7O%23zK)4}7G!6JW}Z% zd$9^2L~xeB0?8J}EljcR!mhb6L2FXHf5(Zy-%r?7Phg$obR_2mezT8?1zeG$}EKT-&V*| zn#)Fq7#FxCom~aZ3VDnLWJ)=rCTBDe4!MikZgxI-*KIYMB53LMxGgJGy+++BSi`87 zwQ#HVDcp>D(xlUK|BB?T*!AucT5YiG^RHsu$1?*!4;4)w)6VsV5JjzU%2S1+EF0th zN27jZ12Iu=kW5@Qrk2;`Kpfhx&ojqgboYtrb$+i{CX_MniZ=W?j+?IZA@S&6%bTvF zT}4-fgr{>yyAF^s%Ec$p*mFh!M0T7yPB9l!tU@B~0CS%ip@Mn(T=ijeR-vKskoBVs z9nE0f0!s8p*jK#0`!SxlPZs?w2v>%iUQ~4avES3#up!v7#p(YkG8TTX{l8iO9eAwk zH!L@?%-4!w>vUHXY&|8-5k$!x)kBWBkx1#0_r1UPfI4F0xG;J(9KKvWS4{={Z|G4- zJESV@#;|#NFdlxi86bYz_mjpGPI?2W znzzr80i@gW({|bRP|PS9rnD3d!GyeCZbuiF=D$^N;VflqH@DYPc*r{Qjh4Jvy@p&i zkS*b)Vj!Wy9=O=Dbq$Ob@zn?20yk@PDw|waZk}X{@Q}l(Ql1KrjhhJP+;SNd97%XW z`Bfn48Jh!cM-RcI(OJp^slON$>|y$XIWQVRnYlx+RST@b;^JPMcn@O^V)4laEI_^m zOwg4925-vbwC1yWj9)`xoRK);z(#$sKdD<%QebPYoMho@`(Jg7@>&-kuQ(s-p8Cj-#@h2%ZtE;0 z%~frECCGmm2@EnU7KSnE-y!MJASA*sP~-mswxe{fPefZRRy7HIch27cDj9D_Ps=(% zTvS&h#YVvyww|%V>G<+tUId+5avn51WN9P6VuA~khkGGh>RT6}AJkzLpeuslS3)+{ z1EMR7ixA{8ra69&WDba+T4Yx{lmr=NZ982LvI*}{;~;it&+Ca^=`P-lTmkd0CqK#r zbCcU`fBEv|pU->j^5{Y`(CqX&;+;D^43CgBM)?h$IZ@koP+mo}npDkLv72J6xU~ME zmXf^`8jlob#rUF)ua2cVWgf^xV8^o+M{3++wTtsjGt&oc|2~8*wG0^wh#3n@W3*PE zSA&=$^T|X3&=QNClfMJ~y+lkcTo>f{s2o&&fNCZl9D>?~8(F0MO4(*o>|rJY`t%g= z&K4(G+;8VtN%wR;p-~WluDUlY**<%R?|1d$KWOH>86j$-r3!|}8wvumec#W}=wE$6 z9aX>}?{RB+jqjzNnNqH_17RghSy7Yy3iri+QJV z4Hfg*mT0;ZJ0rY8zGQnDk*?aB8Y00TIi<*oWOh)8$3tS;x0}umRjGo{wJ^A`QZ5#2 zK)hJvww)^veOJl~4veS!K7IktrR!4t#z2jZp%d^zE4_i8CM737balJI^B7 z%B;m{wduzXY3Nmx)9O)*r;iVfhBaNePG*pL%+v0PS#SYiny=U0qyU z-1PMH@UQ|a#Om1_u!W7^%VT3>UtL~OiurP5$v#N6wlsP^EI_xnw}B4|Cn-TR1alzi z#28UP&u6tscRr7`4?HhEOVjb>g|G)iTNSK+J$io}J*^37|K=xkc@T%RbMX&a@ zwx8UFy=;d9(xlPz_Vb?xt8g;8_8!ad7l}!ZYotlJIQ#f~2B)V{t9$U+Z~JT+$%-i! zu0EjiQdf8-BqlLy*zoo1b4ZaTActC2%7VC~Uz3{kv{Jb0ynfB%o=eeCHUCzJ5bY-X z==a=M7Kw@9-{Fsf)tDD_KvMPawu6v~@Qq;lkZ-<5cqmjO6gp9bjUXi*H@)R4g7B5^ z_7m5_?R2uq?KGxp@b)k3%$@Ve0w8Z=%p~}xpFw};S009SKor(=rJ_z|j&&Y5!MsFa znUu6w*kTO&aoV8o0Qal4qwcamUa(n zn}z36c*cf(=QI@wo+i4(j72&Jt~FSk(otAgzV;f%IJ^<)DTn)s_#Q8FYjQ2_p!Xno zNwe5r^|{W^PyjyGCGI)ZmJGJ57S+aY3EW5=i?!0W;GsE$n@)4XawGqzgS?M7PHP*o zn3gwT(EW{;z>5(qqA>Z{0%-cul)u1<<7ul%;PzD}*NsaMA@z!oD6W|M?Pl_Gtuc`@ zUQ9hWh0W@ce4{5@q~{;pT8hq2C+`48QU){~;#tcsrdgzF7FzTSq&{UXzFYNJ30iZJ ztvalx3=JUOvwp+4>UwLuV^pz^pr^>1rkg|=n8=|%A=q|QTr zCT{2}+Lddy`J8TCUN(+npZxh#2Of*WTU~hva0wwV&rpMALGn-vgA+hLb9|f#Z|UrO zN)+1onxEfvDOxc3Z&o|q=&W3v+`m{5u5c@}naqdI+eiKOaTtIlR% zA&hST1qB6%Qn(htngJc;{OgaL{X0J*P#^B^0hO|e?{*Z0kn`s+RoqxUM@wdRff26n zl03G0!Uhs#DF8}NO zHM{YgV6E!DVUATz1w>F$FBv4#7A+JXVnvT1;3ZR_{Y{HCb+W0;>-@MT1YYyJDtY@r zmh_Q&y0NmkNhR@g2^VrsGHDYhKykV@cGgiWlx}1~N%U5e%>z3+;zdfM(F(-lQO(0I zT=E~7Uv5|Cm2AASgTJBG?)xn}UIV)ksLD!8Fy~6lrHePjCeV?%A&a(P;7Y%1fehyo zhNGQ{%s9oqI2vj%1HynN^2+Hqyu(2`QD^Se21}8pU?9>nS~Km3oUPAJ7We_%mN0d( z@|-p#pzyZ;ZIIbbS0ea>Z+;q+SFz!qMlno1o~M^A3~|hzri*s~UKCo+!6Ejt#-8;k z+ZmOVl-M65;p1s{J~k&mZY6o>L9!cJNhmgMC@k9H@Uk%eF+lpCgS+1>Rfq9B(nQK@)6Z-TG%Q?&_=8VSg{g0@1?-{-P#dmNYY_}tJ? zSodYZ$Cx~?YIJTsNqwMYt%z1}Kx_U%)Wan3@@%@dm7#xk)!N(Ntjn@7gNcja+dFs5 z#R;#10N)V72y^OcaflSF4C7Bso6R=2fa?i)D76Eem}-P^KYeP-C;4>q`F0qA?QBQs z=(tj;^Y8(OMxw(jb_Jz4#m@HZ*Uvl#M$_uYtR3>4&kg{k9n*6StL6%G=KsDjMXU5u z^|6lg1m@J{M!9+g+-dRsJ4Z)VLH1*oya3H0;d@pX1nka$_e!JOGJ%y4gNlku5a``b zd9&-@YRI*C=w(ppc&7@}H&V%g-g!WEE`qvsDuLjMh5sp&Pfku^sQt^PSwtlzP1wMW zMxKDfhbavh6#(eV!^j5+q5}BQjG7htW8kAeg4^eMXgWC=87o-PE*tQ+IB#@hd2Gzi zT8PFt88sgpoE#kZZ-t>=pKiL`2nz^+0e3J%LLIOS1D^$c!O4j;L@SlSTMdA_ulY8G z%JiYuhs}vR`TPBDfYiO}%V&!jGibF--RH&_GxS=xFn~B4G&?^{jqShm*p^5Ei889H zss_o8ijbM*=?4DxMq$1*JG=Gfkj3G_F3z2S3d~WKt?D}_#Ojqz!o$$D;a$L zN0Zsd`M9*Ft{3w2V9 zQ)8|i4VymU*f8TbYi`i1o2~oC5p#&9oYBA<-d)@GFSF{qCsn60=}Km7yPioN+-faD zURxWmmVB*NHf~ZVY%fT?tFW~;+08AJ|I10nb1(V~q z|H*qKs_byM^9~%&6f{^O6|5#8 z-^7t{hwi5*IFC*4X9&cPX{A)c_!_OY@k-RwFZ4EEp$cBVGIE;uX`ao^h?XvANWxW| zTh^m(T77QmmCMlpl}P99^b|@|Vmnna7p# z&KjeyPtt+Go*hgjNX$rxwn#>k-0R>NB&@48wh5PwRy&5bQk%b!ymel#@8<$0XrO|$-b^M5j)jE<`ug>& z?;l7e!0IYOQk^a#={Ptz)S}*wz$FVxq7v2N0%Gv)tX9Jxjv6NOC4#c?{?2Kt9W>V)yfv-0ONPc$5rX5E!EuX#)Y;n^!I_YpLY|1t@x z4}rg9>=VWk_t$|&h}F=tW7BqO(B|%~h*?(2zMsxtum&)>&X%f%%*=!y-_^|9hg^OP zxhxI2FYPyXv$I3*nQ>$s@@J;@zw~*AZw6d?sNOG+xdiUwN~5uSftce&EX);caS+)t zVe!#{TB=902-y4FsFn@Iool=1rFjW!A)n&;xDpR-chs0&pZHavph=@-HJ5TN=_Uw% z+{-!5b6UvLF`BrBvb3?3??rjH)( z*c1dbssX;BOk7HlsZHSawG262Ebu#z(%m27DLZs$n)!@`EI%9R9oCnhGcvdD6hJGCkiU1vjkiwX%F$#t1e61|8~HhkW2 zdF)jHoW#-Z_4Y#_$B$?ysYx3dBXhYttbF-+@jav5swHd@Sjh?;m05u)4{c()3GhnXUORzpLLTp< zng@%7KX14NmD3@Yxuvxss7F>LoVveJ`a&R{chZYXCE->jYFfrb+9eUR{RmD;aRdL| zsP>YTJ){G;re zZ>T{s@Zx8GKvXsM44z`EDiuBbB5lR*6G0c-*iOy-FK*s_?2h@GZWD#Llurrc3dV&W_5sg3Dr z4Gt7(ylAsGDe{VT1+-8j0ubH%R0bWtruSS)a7RIQNC$BK05@-QZLKRHh4b&+KqIJL zJOmbV0Oo1;IXyW%yb!BVWBL5NhKAP^NYMjK5=8FMM4eVcY6^;mq=jAgERX!r-ES-y zy{@AfoWLXDRV}cNKaUHU`0WxoBsqQQEoOP=bCL@e9!#dW5pyv$Eo^TOu(a&^jsO65 z-+2(^Q~R=mmupU=e+8I`PkuCdYKe9la6lZr1L$ubA0g0aFz^g2@ZT8~^>LQ;-nk8n zi0PbC;YWiMQbuFSHqy~5Qy^jEFoBZ@EX7DIC>ODz-#*!?#A-t-?p@MK?H$S3Adzkd zw|M?$<=)2wLs`1f$uuiCTRqR~fX4cH!DF&Hs-HhD%xBUQAG&!SObZgQ)Eg6+$}S{l z9BN8V7=N5Mk$iXn@~8u_4i~SUM{1sDUao4MWiW@u;eOTi|~S|_2as*hEE8n5waG!%aGEP6_}nQl~w*7Jr7wq`bCLliibS0Ul4d{g7u7pp;^A*na(_0_v+EdtqX5M?BrjA_D5q(-QFj3`b6iWc%`L99jOX!4Op zcKb4}KOQyMk}GYvWQ401F-a%ES8J8&-Kr1Es+C)`shR@`@Ki%_1s6l=RS#Ruu1WM2 z?v~cm6M3Pl4Dm^pUAt7*D!mHrhWxgF%8WTvU?#_=Q~&0?mgf)uIsO=t<6rMe)`Y#+ zF-Wsk@Ah~A4d`7QBpGGw#V9hE;;>**@IsV|m9O-tOCHa8hq?$u!HOF=tXY2=9(7R} zPuJ*Apu3olT7_RIwE6nJWN;ovgmt;jn%iHEyOl;j8}6P~6t(xOQ4PFy#BqfJQU?lLnu zMgnD#NXY4ThbZsI@^G^eTf80?ng7t`y=7}aC(0v>n{D@L1sz&RJ2FLlL(Q8xDu?0R znMx4bi|-e8-xNX@snazK8rME+un<-m)DxWp4s13^0(|L0tbJ->(;lh>nE@{3$2CKx zI(gP{V7;DMHgCOh{*c>9tuDwj)FyY=RWNQ{Ie)OWwr1ecDIh87A_Rap0#)^`KLGT> z(Xpzg23LwJ)4`H_`IEK7C>9W34<5C0xE15$vk=YYgu>kG*7dnER{)K zAnz(BBz=JykQna*DRjv|=(y(n1^6?iR2q@Q7#0M%ROJr%&RU;DTE|AJCr=fT}QF&NK&DM5N&#ZTZVE3WdSM~{QKg)PAp+T z8MW!E-1$E~7?8oW8dhtRvq0F0{)9(yo!XubwL%Y9o4xn$slZ;bxOl5-PPm^9Vw`&9 z(=ZLbS3+`JHuZ9aH|pb}{PxeLoialrdmo?`5A~4f(b1_y= zV}0&A8>O?6)5s(n!M0fWN>s@bkimhvFy~6YexDk6_tC|f65u17rxptC)(CKMlJiZR zhnP<3J82BTrQnpL-UKF`QHJ{9KjRcn6kkkkuik7e!2;lOM_)7DRIqj;+?l}lvCgLd zui1vP(I>M~^KEXYyvpCH6sjkuNCa6VXPd>Td?Q&OxzoJvz=sjo@N!fIw^fVY!5yVU z3EsDr`9G)Yd@W{qN@1eSvkf)tfCxtH#-P*C7#|;9Q$IK09Q=hW*-5Xv5*8Y^;eB!t zb-;0skxs^~h^9XJF+7Tw%Q)}(eA&z_JbNg6nZHaMhP7Q>z1!OC>_T}oB>MXg4hqV< zQ{@doebM}actS$E&N=a^w(9O2%|7x)=LB~6U^$Qem8TEQQYWOF0J+6|cfetPV87Ct zzP-f+O=^ zU|3$-szq#n@qRKB8MCv{G}a)wvgj}npN3zO51^Hkk#F>ff%C->=S9)PBhz)d8owJY z+Nf>9+3lpJmNQ~;S45=5`4M)yVxsGz+wRbQ1E`(W!EoWG^cj@vmAP-gJj}0Rv%WB-LkZDMoTFJ6k(bat)>Te`cudwVaCYSt`9;VrPP zc(VfgDPX)^5pWu;u{!EHHSk(--j;C5nOEs{*Z@6Z#<0XkM@RpQ!UZ6)e|dKsT|o~A zK9DeIpGj=0Of?z;DdLDZV|`)dS0IRW3Kc<^dD$=tkAvxIL0OPdCi_OAYI_QwooE!O zq`Z13C?#L1r6^0(vhTsd@V3kUd4924?E37}=8s``3@|L+{JdjNP*;-Q)I4i2*Zok& zG+MzM`+W-MMg!d<+(Brt>m`CDiE8~9NHzkhD)zi&@-%Ovs+_<>nk)mt@iN@Qz(&FQ zIuAZX3{5Sp`fe#7d)Ou`OA!KO__`SR3|voeCGX@R1m%7<)UWpj0eBq<*z^sNQE^UI z8?K}#oZgQ2eagPk`ZU+L(C63uUe!!QEM2fG%}v4gX5jZS$)xyn2r@#C+rgi=`OnXl zE?vHEg82M4l#+zpDP{N-Si5!d0$4sq_3pA$qw6&96`V-tQ2+*N*eYu66EU8OZd)y1 zFx~6G%ohjjqe5btect_G#lO9h7jMBpwv@NVgqi|`veI0M zYgorplkk-S2qBeL3US?*KQ!MMx29A`)#u&p#=~fG(CMF;0fs4+6wnD9;lXvM&C-7E z!$hk?$e{syLmJ!yfE{L}PR>SfOW26Cn;o6&mF4M{c&q|$l#f>+2ppW-Giy{zL_KVF);9XMM^HsmKPZrcL*Z9sIR&^) zre1$mP?)LXTzK0Rbp8WL^9rwl4=S9&o+lAS*T6TCGLvD3j^cYaND}P1cRz!|c9o@A zX=XG%c_Hdo^N-@tFgwUAq#`Qu>)y^MahBa!^llnD`4%^<5KO*K^&D9m^= z!AHaJW#U>iTea^Blhop|O(7MH&K{Os!&XyOb*o|DT1z7b(KwG>);nB2<$j3(TFL4; zFniB{Y#rn6z{3Sjiv(QPzsfChoCJUfQ-i8y{qo^SZ1SltlV1W2u>j?t2uwBf>7!NJ znRmgld+si<3o&)B0(lJ2KO`Sxy3Z-P>pb=+ei&f=#~v^9XX@h(&IIY*wXP0Bf<-xl z2cV4L49Ng%q`~3(n4V2#f@C-f)s_foA@!Uh5>>KE6xzTYMWm4tWgx;~ zND}dtvD$uJso`fU*tvP}ROM4Jzs>h|3Mz z>r;vDwmI)Ux}S<_!+)_{yK+xzSnH4?)9V*i54L?v+y9PyDFxCQemtr2)4Clr;5x$E zDJn-VGM!z130a6&+k5?x7APalg!%{)q|6~-@oHMU3YBLoFPU)-r^MXm5Z%W*8BmK$ zML~-7oF}SYT=$0$k|fbc`kr~Dson0reNNR14H`W9x#fIq+&w}WDZqsl<&{-ijjt>C zunnyQQVCt}E87d+heizINHtU_mDDcCdleLzNEWmuzm_94i!4CBOzW5@C6KP&Fupq( z&Ey(e_q+Xp#7raM{#y!&te#n;(vP$LJ+{^Z#5@kW1 z&fb3>TO0Q)z~rsYHV$>JdN3WcH0~K#;uU1q^Jb#Vc(Bix@AERgv}wHOeHpbUa$+DIFc=zkH|WGXRd%UF-$r5h~_+rd7;d>Z-#=3)X)y}>`T`_b*BmS7V5>| zq}Pc{yS`WMlhF@LL?7K#@_o1hJ zu|tE8&dmZqJJb1kcUPZ2;fI<<%y3gjM@M`63Vo;zTZ_yY)sXyu^u5_vRCt#@7 zrb%oJ;kw;>BFYR6C>jh{tOVL>0NPnXg8G9xnO=v>W>@OBfa^maEyDiC$HyF(u0NdR z)Ig60u&o*ajT-=C4W!}#z_V4|%FE|;jc7Ac+|_8Bpv&p?cAp1j3%W=jOie{Ums((? z?RiT)SeNN-!Sc+(IJGO?SrGJ6`uOqSbEUw7{oUe%J}bJbz7h)Z)Sq7c_F{nr7#971 z>Bw+eNUexTR=XpIOq^U44wZcji6uCj!#xyU&fJ`>cW!2 zAoYznnj%W+{^{pq?#6Y;9AXbVS@QW7o*Z&jY4UWGrh2@@gBc-(vJPcXRga^0=bgmf zq72&xZKLShB1mC$E^p+1wH2{n&o&6&jasz679%(X_51$yVAh|%y&SDE@%M1HauYd( z{=mSN4u25LS(X&2YyVb>4`Qg|l#i81onZYq#_%-~KcW&9uMQCt)t88$f>|$0t6jur z|B`+GnkGnYVry#}=~TrtFW^PT;fFG<1J%b^d-Y=5W|M~>%Y)RCJRbD<8_TAzP-0D? zR^N+UpCnAvk;HzYy$MX^ekAe=aSx5_{~Yx5>sp&sUs`9$#opS=PksO6iGl5>*$3|? z9ID)W`&5uOKaikkq*37#b|4#?9gZx++kMt|o+wB(Hx%{B>4O6XQXUhP z-qD(X9_|}@+L_qKp|rm2Pox7wZQ&^01n-{A8!SH0rB@M0=Q1+#qrVC*lpk2%*aNfs zNL&nk{$Orj-{kP|aAM`%`|aHMilufJ`^|hk1jlPvfI>3Hm1D0(Z~ST-Dewwmj_s-! zm(o@RRYXR~m9|#xXH>4kfp~4DaiuLt`uC`aakm}|nK0@hD6V4*HtuY%EnX!A7k-PM zwkLzJy_)%Tvs{;A+a|PN)Fhn7#+O(hSkPoM5CO9O2ZiWe2pa@|d4a&zFyI>wlv1r- z%-=*#8n6*fFsfV?_*eh-IUu>9w|SYQMFZ%Uo4d)7;QtMp*VMYD!QI_KV;j&B3pclO z09*Z_E_OL3nm#2bN-A1M9BX2Yr7=Wu8F^vh1r6r!q9Mos zA5U)`R@L`KdmlQ#lr#v3ZUO0%5R{aZZUm$o1f)Yiy1PTVyFt3^P|_t*(hYa{yYGG1 zUpzdxLD*~Wx#k>We3tGk1$ju&LjR#}Zh3K&lXhP$_e#=H+Nzi{I9iFIpS~ zjh-VH$ClCchu9T{KLX}Qvf9S^JhVVyA&vZ<;%zcjdwq+q& zPw#>ki9DJl-6bAPbERP*K!ZVh@zXDnHZTy90op?H3FC(4&O#U0EwdRS%i|&d{`D32N&%^tI%$Fb# zf@_#V&TV96VpAr8Ttz5}NRKugK{sToJkQ|v(;+?0pKiyUIvRtX_Z1Gc%96nKs>%=C zr(BM^NBgV|ffAy}L_0v|6WjxaZ@b9L__Nxq$1)8j&zp;NkM`q{98U^%I1j{X1FJ2oZ_J(pn_SP&Md?+~4Q1FIblMa)542+t=GOAVUBWUBz^M zP|k{1rUIorSgV~r&)1#LkHF#K`Pfj2oYbnRqjSzw*wMR4X_&d^o#NeTP&ps^8#EO_ z2%<8+`zje?x5)KpAJmf$hoph0Vgjko?-Hn02>-QM>9FBU9o5$}ra~CWNhM;U1x#Gp zYZs6CbYf!pU9AVYQCuT5T%c^auLQ2R`*44Dp%0-o;MVxdyxyp=cKOkdf^L=WtY^1#6R_f55BNayK%$HNw-w}Jg>g-|=>dx*{cHn*_E`@UtBG+D1JTOVEn88%Qwhy z-@F5EK%OyTfYp#n55h6{xnPl>uE6i0sGTG1t(M+_b;sQqnWD%!hG(h*Nq3{L6?(>H z%QrV@cI0I4~sz1j> zjglC~zTLGp#z6YsJ5&`M|H0j$`rtY&|LE-e>PEXYZGEf8{?zLJn78+&^%w_fIugO8 z#;z?DzQ2s}(%9|lGAv{8IiO}zxeqC|<_75qh<5DoU+6JWfjgWUrQz=P_Mna7e<4U( z=DKhL+7PisQ;860cq=Tq4QgaHC(Uu~S*#>0ld_;IE#hJQh{;I}6hexh8O^~u^vQdU~p$oVgZF+wR$0fAeV&a;2UOeo~!_MxB54yg3JK-9an zHLHRu(+*$ft%9QONRM5!L4lfu$_~${L7JJJn&_V+H!M^~V~=>m*3pd-nU`NbIg##ebf);hej4**eQ z?Nt~xX0Pysh)dK~6JwZ$f!8I7>aXzeny|QFZloO#(gw{C}w5k ztMbnL_|@TqCvBl$T;o}dkbLYw;;^)`O2{5^bS|z`a3H)un$VszvGtWIVtoYdBl~mO z^RdsP!PB<@ls_>&e9u?&Vfp7m&%4VmxcIOFT5(-v)C9z3*80a!UaYd64}FzwE)J(X zbIT63F)ppzq+Pqnvf@LGv!~kZ#ovq=4gx+E;k0mi?d>M4Jrq3~&yS;IJ{_(qt!F(n zJ}cqfhkm3s-h-Oj`t|VS-Z{FJtAu)@r4lfr=@7sC7W0D7c2#Mvq`Da@rk3Tkv19Ul z_tOU)9Ob*#9{Q6CaOh9$CkNxW(M#K%*NuPb!hDb*G(VC|=PcRS@5UGjB{YTFj;5~a zim(;E?=XB7r>}4%>a<_@A_Rmu<7k%W_G-G@;=EQ_N3uow*U-Qw3@ z(af5>oRr2Fql|xx<{ajlt(J{|EFH!zCn+Yb$;sRKa94ivyb;9yRF9$zhdRVeOHKEw za;*X-PChrobxRh9<{cKQvqr61t@lpPZLut)00$&c-H&5*+_C=k)bgSC-gV|Z!PAJ) z2W~OnzWyHd`f1jNpX-(D0Z<6?ckIkt;vGm7*vVO#4{DCSXo=-a=nUj=a~!df6@$

B=8L0zP zq@YtD{`x$u`7C^sXXE0M`HyN*g83z4%n*kEhZ*D2I2Vt1K`$QlNq;D5hV@RW-tB6N zOoa2WhDygk_S|LdRV?W9Uol+fC|2Nlw-Dp5WBXx~EImcC$9aqw#-C+*447Ei{L{3$ zeEr&{UwOaN*wAn$eEp!n#KX(W%gMPRDhMJe_GgOPshpVXu3l-K@KF?~Y4N=>?FD+< zksJ|%tmNc{M!T%F!OMerkhJXuTpIw>MJv-L0K58A=N54#BmE|2sIDzUa^-K+ zI#ZIxYl%Jg-Ym;1$YM7ME+dy^gXjfoD7-{fhx5*EW0uz`<-?u< zHf$&Hqmq{%|L#N7pM-vt}ohHLO^|>PDCCkC%N&Cf;ImNQhf1CoVXFU4U zpR^LqNcbxdjc7l$&ICJ*3!L~Sn2&2FIk(RBJ2UGt262um3H}(bba{RRKiVAz_M5MU zSALJ4XwZpu-?FuU&+8c(A7YK3+u~?N(|+eg!TW7g+t@kuH(+VfpM2Uy{)!EcI*9g> zaqnWEGSi@={c1_hZuHptXjRnd_u%T{B7KlAJRU3irJZiG`~BTn&)A%fMiQZIl77>duLnv-v3+DL{!(-mh=S|sWezIL)bQ25+vzaER?&yrS}VDB z6iN}CfsNsaz5<;CUDSw4&ak#0^Zmq&N4IktS?ye$oL5)NM0YD++XP;rGc3T_yH#dT z@5xi|vcGD(IZDDdSR@K|XtZO`>^yQrN_;CI5=@x0$0CKj<_>a+hL1t69Xs#wdnqJ3WtraM>g3Kd#YC#~ zaiG~4J+O|jEk*~$wYi#?FUSR+IeR*Lquu+O^S@2HHo~EAq}tRd_01nyU+to}A(g*V znyX+&&$&F_@4xue3wPdF+n4Y`lrM&mH5;m6G#yXX-L*>yTW$|xdnNfPqiQ@FJ!lqI zsMSCxgTq7>52cg*LBkj#hM(Sa?S9AKHMLF@X6R3YlBLdb$9Z4d8v^k`Z6L$1@? zUlnm%jji{HERSc_@e!E^r$eNsEQ7!4zw8Dmn_x@xexi2?5BM98neJHA{ErV9>>^(d z*NJ@g#nFUajVCgF&Y}Q6@?jY$QC&RK3G&tQNWXcy9o;{ucHUrQ-Hujoxgbc+g_&%v zGaKQ2%;|xKt^2;K)w1JlNf*M;;Qr=dNR?WDYH8}evqb5AAS&#!*=2A$E%QM%0UrAKd=;x5wN(#27;8~^(OffHN7OSBO(n{3T4}qXLtvuT;Owa- zvuJyLCqjdZi!mwWEr!aFfx5m)Hm?CwpP$GSz%DGqv5+`&T?%9HCM~k#?&ba0;`n+x zGYCiT%4;V#Fs_30)31Woy_=SVRGhZAYw>3jExV)H3)DV{1%G$2_3qh+X?6%atGGr< zVbV@;0eZxYPJ72E+#gF~{X1eRpV?^rf*kv$UA-HUi2b3&(=r=@1)i2t<1sjxBPvaX zHYQfb!(+XzSiEoLc`gnm*TueFE7lud_>Cl0R+d={z$Ajl1&SrLY139HIFO+Ve2Zoo znB?4SsR6_-v4dK{4|J!=UAh1ITfFA#9*tuEzY7 zM=O(m{z8&{*A`a%+iOV+J=V2y=Fyh@U?&02A%@{ zMZ=AbLXtBwZ2kx3)wVL2zbw#GJb8p@m#fFpisRgH-h|m|0bfK{Qd8()QcUO|MyFR@ zO_tk@CR)O?#Hk-whjF=G$MtlIERV{ao%e1vN*ZS&{kG5DSmi^zop(V}Ui+L}YzGfL zto|j}r5KGC`K3SdSN^{(G<8yQqXf zy3=)IlTzgV*H9exQ93NR01@5S6i2FFuor#B&DF?+pbvI-W~rT>TisymA=B|oG4Bf| zjz1zIfmXM&S~0gu>usga-g8<8$@3=B2Q;YpCj3Y#S?~r#?rFkoIS++0*W)qY?^hK$ zGvd0~Pug_Ez$)2VHT-B}e)HC57i6`emV9TZSc%fKAX_fPPenXah*w=RN9uQSsd%5Z zwyv&%f>4-*G5SXl6&}ZF!bDU_Jea^9d9&EZ9@)Mo>I3uku`+gyG1_L8(2|%EP8K+N zTyV|W@!<@GJ#wL9Q#N$ws>JnV&q7|52lXip}PjTG!kl_TO4p=kDXyNzM4VR_5K+uf{7e>zs5sLGT2J#(wAp(vaEpmSm z7lgR{sRmhs2RQ}kH5oZ#LR#7*n&tR3B1Nm5Bo5Op)A?JhTxx6Gu4#P}5-#q>cJ%xG zA`8&J3!75Kjh5S7;2{(%RR(*yPUtRbI=n`eVns}YZ-7!|f13R9&I0B%9vus-qKVBP z%wLTT8c}^iTty~apb6=TfnZd@Jq8*V;2smbL%eybsZ2rJKRgjD8d_4erRl(hh z=&QqACy;CbvWkU8@!E-wRoa+YEUlC>|6a?AZS5i=2WYAfIJB;;{`xgq!#Z)UUETof z8t*CGf7<;_Y6`-66SF|<1azH3ZpW-_Y=8j5R8W*bOiyu}_ZMlEKl0JJD}sy<=+DdQ zS3yDd^mzZcp=tF!mvQ;eL;DvGj}{=!Ts~qQ5jMF#NkaMfFFh@+9(?BR7rlu*zLa-v z_xB%<2lrnRL^*HPzh(GYs~0sk5I)~1dM}(f`y6R>d^y|b1xhSCo~Z)~%TalCq>m`8 z9fW+gOD9i%6Kd@Y9;c$b5%-J<@Lz>kIpFj44}|!6m{5mX4c@K%@^2zU*2}rGAT9l3 z{MF3n2qwp#bvvH*v_9{Bl%(^Kc>Ri+kdU|KvCiZZeqcT0abaPx6>|wPDPgqsN760n zWo6>RAOuK(@FG!va3zt`V-x=Km}__?ZZ{G9x;q~%Y(O}sV3+dl03 z0-0DkFUaTDYq(FfJ=!R!v51c2;s=)a?{wNJh~S|+_=7U`-1?)$`4Da@6_ZrZ`TtzKiIfn=k@P|{ZvvExm1JG^e?{6CHcn8>cV z>XgpWGcbS-h7WKBrKF^Qa*C+c{d^Z3Zua)}&V}u`NwpGh^6P(LbWGvODO?zgP!I z(|>kF05g+`B{l*5c`#@^p7D5mC)|0d%dId2MeTUHtOQB#ds#l-PENI8SFG-Wkr-G4 z0>$-ayTCu})~e^D^hWTDt?e=Rw&FMso9s`UARG|e`!9msizBaQkVaUp{yq2ieo+?k z!~0SkX2=~%oC|MQY>or9tj;t^p9!Unj*WyOtqp)GxV3_!qf{2^LBMKY#)zLyjrtP z*B*DdR^E&o=<6^4hJ#xuus|1Ay)2YyX{&L+O=X!jeuCA+ClnWs6IYqB?7tnBEnpVw z7eGOZ)W6#xOBinSzQs^fP@E_$vuVHR)gWqJk)5o2%29dqL9=c7%iRlXpR6;%zkY0l z7?6afSfb|IXcbwi@>m1K54&f8iMD#xKnHZ-I1ebZ1i*@H>Kagu>i z$q)W1T9yR~$1g`_C#`g`^?AcC2Ea(|=zeBKi+JJ60 zb3fm#H+$@I?vOC3`F~n~{{|w*9;S&XJkNWMtj-(H$AeXH1wQ*J1;(*)ro89M z02$!k1nv^FCrsg(#kMEYQO3uJ?m+xHeT+&uki%n4wN0tFQm6*fN)OvY<= z(!h!g9vW%M$4%pd0BoaPTEA?g$OQIq`_#qg=xDKOk?YaYj?F)J^7Y{& zmxzcLIDrB00fdpIltQ0Xgp*UbSndB3#s6SeO?^HDkyWm)gkAr7`B`s{5Tb=k4Rm$C zK3>iPU=cXU{mYtvz6Du01d6`p06q!6W#v3e!Z0vWK!kyV0uVe#GTW2gU0hsjY{rs0 zM8OwR^4oXGwH6f?>VG}$Yi<@WK)))~>Q9bAyc0Ri;X*k0?)E26b!%+#^21-)Tzz8& zd%L5@v5wD~V^J%?oU^8~3N0vMw74KQoQ;7IKMSB;EN)z4ne+SK|wLBZ55Z){%rjIP^_wA z@Qk%WofygRrIT3(%R`3j>oO&4WnUR-ON1r0ma{3#n${2YIjS!v4%wgXCrx_4E+rY^U= z2f7Q~RibNAlw?-bR{a#)!`|8Lx8EDx^gA8d_S5uZEy_zTf2ho1zZ4bZVIt97Sf|Wr zZm9b;@wz}t3Wb`>$}YW?R~^^qHYw7=Z|nVaFdm;BdwE3#o%@F5XPatIkG;b~=K`KL zwN94DvUV=+)iYd?__ZcoXg6&*G=@V0)P+W5Y%ioyUFCb=B&1c78GpEVj?ABwRazSt z236r5F0`ik-O@Qd_AjXqP@xt~{zUeNP!qhea|YIh9ZRX_={QejD(}mYZ`g-689qW_ zQZ%8k!Ridn3wz_eIlDbu7U^*HOSW=(@o<47y7`>5uLqvHs{O278uJR9S5#ebip_W9 zp60=t<6p~u&9)^2!k4CCpnLf@?6V>goU>pCm-%?Qh-br!?R~KRnhHz7r(NNRc^J%g zrDcc91>D}MiXtu(6#tlcfY@j7d}-hV@HSG_b#-+uEq}`M2eiwp)k_~{-&3=(vlB_3 zPQJxHTBrtjaG)&Q8i-07;c!Yc{TKS6(QyVgED8I>71p!^IhhF{&47eDb2GEAI(E(K z?s%&n2j%kyKJ9as?rW986QG@cd%jl>)3#EKZA5AL6`)_)*wV7J@0>bTh)rp>I|A^G zZC^Msdx6KFKN950Re2@U*Yi+G;vlZ>-aVPM`jl|A`#hhW;ngK)EW}*5HVg_D-%l;d zMR+LKq2(}v35}?qgScke>bW6ICQs?z>p#~{`HP3g8SbJjElv8Wua$6%(Vgq@=I*ty z;s}GJYzr9%l9n^{Q|ryl=%Mq8yNaD6C{#2?ThZC2g5kLwtA}7nWfw64qNG^(#yfc$ zzij+D48W$gRZtvzWmnF}{L$pRgY!|Cy@ni-WR_lA2)5*h#8!T_)GmcuoVPTK@Sg8p z3ogfDp4c5n=V5>Iea{qhl*E3sVhLRqt(+TrM!gpvq;7=sn`7{Md|-lqe!Mk?qHaOF zMP@P7cmVpj04X`pew@BN5+BIH6ve8c7*^#N@HQFsRfuN0xr`w}(W&{y{9S5%!lR3r-8VZ*#(z1yxUWXA7sn=^(E?pZ1wQU+^3hc8BEzaBx3f{CSwx z6#4Rdp?alufip-?)zh#2=nc8}y}V(7W)1|i%3f%2#yciR9G~ofJYMSekrCUvbd%pv zEw&^C8nTfWETd$^HUh17Y&U^<65IJfsFWZWEJM?l({j3WliGivb+BCtf9Kg_8@-l} zW|M)@sa{Xl@EqLD;y6{s%=BTlvLG79Am6H(Y{Fi6Cnx3U6EiP$@$VtzEZ-;H1O(x0 zuktqiE*9^dZ-UtvZd9+%xA(9q<=}Pko28DM3+i?Iw-X(N7GRd&Z7ftbQ3W-c?Z0#kn*s|D5QWjHzW=<<)8$QYlJYRjQVDY2_I`%G z06;xPnwVJHWMa;tvZwXIvsKLlP#K2mc7k3VJc9Jw&W%S*E(_*1hG6qzz;0i zE~T6<;8eJL1Y&X0&&3CxFu%?oG>OPrcrH}(jU}qM3ciG_ULWLnzxDPuJmMI+5IHH^ z=50vKi-xQoHo%ElLRvU8qAkc!M#MrmB&3^F$@EHbA=HhZP=7>~+q%GKQ}Cd1KzY?H z6l|{UkbN-)wIUg{Pn-$zF-Fgai?Bz`ES&jpqEo zE@=Z4GG6rpv*1B>O^^YC6B7&P+ap-zBFQmIi}rIAY}QEPe!@uF7vzX*Ru=S784NVv zMcisN`&X2-RBPDbj=uliQ;Nvm%mQ7}e0DYuBWrhgaST`@cYilf3WIZ;uyQDb?bKVF zJZRjv%2|MqqqBxCj~}&np=S6sjhcd-Zd=P4Kny+K@`0(ntGwdjGK&>y1UmVo3nQyG zDcaUJ^(w#5!5H6O%G=xpEsL`7yj1aDJU>`|wV520r=*pBr~WIY_8)w*c=RDLmUSNg zZ-&S0r3LxEtjBd~pN-!g84!Cbf}*vy>Zy&UH(JC=4UNom($VsK5Ts- zaf3xsuAa}aQ>f!<)5Q7)zRX;<48PUkKi|!0T+3uo%a=$c^n*#Ld7$lJ<#d%V|7bKz z6x6|2T2$+W#5vI-lA~Tv{d5S-g8bWrm3)wV3-GuvnQJMsF(SST&w8CR&weP9?q#AQ zqx<}T+RFi=Ddy(p^*w9f6%-gz_qZXH~+_5d~mJYE8 zio|X?`S-wEw7#JM46vF-@A5>^dpW|W*Ytj!T(@&2ivF0^#CR*Xba46L?tOpMSUOh{ zKYaqkjG(xnm%)>YS=1#nVb8keafrZwpF(UgF>T`gU*tpOV!NHtNVB z#{$trMNSbv$!j^7pMW4jzh6_sW5FX1d1HJ!DWDACr^cY-{9+hwfDJ)2s|lE9Jy+Ue z&ZaF3_@p8bNOvQ%+Imk5;n3g4l$I9r4M$)_u3;Qwb!glc`nv1Wva1=|SB0w$i41Me zQL|tPdd=?86gCmTxJF_tezWtrNpHVTJKz=lUhoGBZ&GhfCqm3URR5K(K6GL0N#^$- zu?UR+(qq5hL=KHy;00!xFw>~(b)+@X(%P8nE})tC8`|wP0cVC-Tn_GEm!lRw#Fe1PS-JzJNZw9U=5WrHZdpuXL-OpIBS?ec`GQrITb^7l+|Aa@3)kL-@MN z{g37514SARm~TyX{-XNOPx>VCy#3+Bptqt8O)@ZKQelbfby49{UN% zUia2hY-BdR-bL;OEyipKB*0?-&t}ClmtTxO_ElQz?S`RzX5WRSq!JfDm z0NCVPR+czfrU_!Niwd8a0r*>l)1S*x%rddu3L<0|KbJLG z?Bn{D!>MTIG|0mzd1ojkHT5jdrg0fC<933q8ka{wA_5MgY2KNL(V!0nV?kIhuxV1p zi^?U)(kaYvD{S#ss4;io>k_tu7S4K{(>z1iuZA`YzR|(}V@RlmLAi6jc6!%>TAB2iR)6vW>prRHI_*W2`&3yRn8$Pj|fpkVPg5jz-)gk|3*k^eq zzeF3r3(=C6NG{0uG{joI;&C*}W^X1iv8buk{u*{-dX$fnvn5MY!fT43%OOiO;vcj# z4uc^nNWV^VDC=!8;kyXM*>Ps!tN|{>IG9SXRJR)}ZZ3k%uEf2YL~>z-b|ehTWX}-Y zwE~(bDG5cE(aKW-wau<$j$tJm+r!^qHEN^Q=}x2mT0(hjEW6S1Fcm?jmuaKK1hclk z7In2W=e2h~~N15f2YKPtyO zT7F}AEbn!Bzh0*8RG1+1Vyt*Bvdg&qRnIEz4e7o@9Bnn)rxI?w&|yUQ)_!nybJHYB z?c;QM@zLt2H|%(IDda2CoT?RNVrx5F^P;s!?BTv!bD}|^O7RzgYHZFc$@#2OD5|XS ziE?IdP5N=#<3r+O$~EG&>e^X|>{L^w_q|Qxz>esp%X}+M8&~& z>*Xv&pNe!RWqgZrkWkCV$9Iylbi=>-wg@?${n_?=$;_dPq=d)qUITnO9r4*ja_icW z_}n~^B>%ZpAd@KH-arkxUtO6Fe;QQ815$&w^y9RMU+(7 z`rmK^*|dAz+a{U{*65+l z{PL&D`_v|}m}UR++I|3=Lk~jj?$P`ZJDbv`tQ@chFN;MEFFyIXimR9>$P`VkN1#vs z_s*w}*;WL>9Fvc~Aig*_5<%7W)LibngVllIBw97OIIQ=X-+G|Whu5YqFq2UDJOn2^5w=P^rk}ACNPWOjW=SGO0n?i%?tAn zoxGx=&*gSr2h{@dki~e7bjaEKY8FC931UzFc#FoygM@?Ng0ijYJ`p> zHC20n-;#uG-pSLMoaOen9VUTl@T(tXoTqK=_S$^(q#~U$bn6RHtbhWu8{(91+7GEkRhD(`|w!3+P^*~uJ_2_=H$?>IgdA5xj{xdK6!77ZwhnTZK`N$!GG@6o3mr)g)n# zmStmik}8(UR){%a)B4yQjX`3<$t%uKV^ea&5OTTcXpX~lEBv{Dk= z0)dD$5R2k?d_gDaXC|2r01wSo&>Uc zCSy60VYtjWb+jM8{fAX8twcY}UXCAi>jaTO2WBtf_E{GMP!mTb&~h}Hhvmibe=|Z0 zGL(V>Tnejr&)8DjRGEnvGSP&4CXgvb;mhLhgnaf+M#uE-uGj9c+1viMCnVH^g6^M) zdFt~`l@?RMUP{4D1Ff8>_|9alkU{82S@$P(6($3kf*cH@;Odr>cbR*pqUECK2J%;_ ze`-=-7@1yqmCu9AlJ2q%dUram(4>ckS-*RgBAg!VLbca+Nk%V}>$L$Yon4hM3siG! z-i!Q4NYmafnC6JXFG}mLbk2XyMX6f%Ucue(hdK%^*H2~|-bW(Q|`#gP~EJ@+r}Kxt_yXmWV(BxpbY{B<2fSoZ^T#+&Ix(RUmduY4E@9icLp_3H43 zXoRm?<*b64^W4lZZsYZaAnC!3$b63M4F%U=oH0uy=P~MAi^%n>gJN_SU5w)`{FQBn zy{6{{-FsJ$!7#Yc>+D}ygg4(gek6KU-~E+9UC&H?;LQv_D*FCyEiNb4{V(838J|Yh zf37Na8_pq-e-k(~pVG*6)aunHA0U2LeA?|65|rmizO263fzdL7_q&^W{QqlmOB!Z( zwAu-D+2*xV(=m}C86nyC@1zXfY3cu03MKhXEX&WESj)P9VS~G$+S&U8TRzG?8?VM6 z8c!{aRuOLz?Psc_5_P z384tj5jp@Pz&yy0UE-t*VzK@c5~!k#-$PiyIE;(osy2=szU1rUdUJzo2vPjHd+PHL z^?bU0F?&Aoya*1>{~E{t^B=VXhb?GEpPrs-m&1-+MTAy9DEasg>oW~sKK0TevEAk< z7P4W~oWr>lhI}{=X=4O>Empt}%Brg&D ziYhY!>sjup@pESLkAAUdUQC^2wlZmA2<*LlD|!e!gCR* zwY>7umhFjz{HUpf$<2%inDbo>5PsE|c-4TbM*)o8q)#!mkT#1&C7U-l&%UBbn^$MX znAX(~*1{tj7Y*cZ_K-NL^vF<4v^4P|L(qP*dhrK7PyOs=0p1&%3uY;p?afD$N8f*a z-*;b7zK|Wy^Cln$M2ro|KHsyca55Q>{G8S@Hm0WfM9k!uZ5-~%DyH6rnw+zbP&m&|pZz%Ecmmr3epaW@g+l-XihN@5MGbbf^p0k;RG5*MDfmW$^+H9^PL8*-;`E zUnLqbA4d`=5UFkukI1zwSd=P5hbE<$)sSq(KyRH;BZL9A~jFp-L;_JeYJJ5mva~d zGRWW>D%Iq--p#Z5NqfLH|8eVyotxrP z8<7K?z@8)ytu#DlG%DU`H@<>QW$c>9F3SPi^3ME5!eNnYVtNV1ZMAH!g3O6kT5dU> zg-pJTGOd&^tr&zBF9so067tQyt1se3%l%`Xo4NT3mNKn*vyxSo9%9}nHOU&!%*IjM1o5QEDc#JLBOxOV}gjD8q<-)2A_HE ztZ(frVF&6HARj&(sr!E`pd~kygse-_F(Rv(E}Ru}qg6h9>kX}u&ST!=fP{kWv zXj{*X;RooGPMZ06b+7HC{ATtnIYGd|%zkOejQzy?z{d3c78z>1KGUrb%l$XIt&3^| zMdo8u6JC`5sDiHwDpPZH)UW*9*rYHxl{a2{bismk^aZ7uTfk<}lm#VM{mH zMRMbXqEiXKYGw$+QNt6brIR!d##fVNMhx7>)0M>%kN=tdalXKE6CI}qq({f%F&lj* zg~M$a;LhMwK$nif=q4#hHCn8Ao7|5X;ET55@#}J(Ljoiu!#&Iz2x94PCy* z6$iDyrxJ6$+E5VO&uO*^C*65m3e>3}2CC3}R^0Chf7NvL7i%N%dNALjDvuw@Q=!7; z#3RJx$ex9z?ly;0O7-VV zrFzh)=@9eR^PMRw)gVKnx;w+Mi~_&W^{?Py1 zZ&ddi0yJCu(y@kxAKJ~&NF@nqAW6L;!w5ZUn?+|IArOrT8Ky`Xcu79Q>t>6t*K91(VR>K7w(Ksw$}=Rr)E(gd3@u5H^RI-O7JTsFbftV>-R@NPQ z5C74EZB0MSw?Z{m1*?SX@OxLbDNjgN{K$M^^_C*W=5Y zxCW+|>+90jJ(+tigF4GP29rRU&-G>$scnm08yxIe(^y zXI}3Qh$TZKo^{BC96>5*L%ayvf39HNbi`@PS< z6wUjSi!g;ou*b)5&5ey3DNZ0xn6cmj<7Fu}4`#1V`iG(yp^{cs2LPu59;;wVTRA&F zA4SSDd_AUqBh!iBU8YBqMA{Tc#YC4xu^E)3+XK;)DA;{HY#}zx&lMoY6a2FS#8X}NK zvl@F+TM^YZr&E;DOfo`D+Y#f_zN^6oapk7Q4_bCTG95*?{XHq7By%(OCQKgD0vshM zrmDFlP?K2ceTS2+{_LH$V!;%GdL;$HzyI z58&nHwP+1mi0cXG?84uQBrH_<$$tGvl>uYN9R%v+T_X&9@j&fZs@(lN9-JYQ< zfxd7i>!Of_brM_tbUj2GeTYHyr`IX+^p8c2E3Raad6#>z?(W7jMV7(IX?Yny#GZwd zMwmp>$IJl5!_JP~S0X37hBJfZAoeU<9X_0s*Y!}7E*x;>SmoOD1Gyoh)*p*^x;N@O z+GmLpWfN{!=b7=`#Lqvz7n79RS^!sa-5irW>x~y3*t8)UI zAy7X2kQf8G#c5-^=eO3pLZ2|^yi|)zAhUCGU~2P@8h50|S(|s)ce%L{IFwojm@{Q`LEiOEo&lgAwS^4HvZ^?v$n^ z(~VjX|2bYs0yXD#)`Vu_jytajatOV%liK|!#W2m5YVuWTsPYiK=1>|x9_dM*>Vt(+j}K@x6mZeX|!BARy`LFVIfL%Aj^bMaRo1gWT~ zczW_)ZUENi9NKExl5MB$!5FwKf>6l^OZg71g^AVtwz9`C1?HhRW+LuKB|COtN!{Ku8`&Jl*`LY{?grVLKP@-3WbN6#qs5acP>G)gX6Y}& zpxoO$Z9*T7>@yBF`UO-Xg%%~kABRh|3NEiYo8WZY?mm{ke7%-%qCxrk@UI*kioo@2 zSGwqIH5akLu-%tpHxULi@m#q-q=rhD6Tz>4DPKG2W5F2v&Ju&~&&@Q#H&3lukE>DM z<2L;w{kS=^>pho&7#h1aSk&Ugs55A$fl5m65T&`3-0o{1i@$tT8~O2>nIKA)`fi*; zsQgV(3FfHfqsH{vq)hPeI}!MXintjUgh^qlWQyus2P#R8HEJk1r3g( z2DSpDTUrEnD338bB|<2zRzMyZSd)`D5$}lDwDS)PnPN(YBa7a?zE(#1TS-RIA!Vp--__8`4)r7qfc15k5X-s zN-X|oWYZs;Cz}RcRD~H~MI2kHvM;mL@q0Dx=Gw6efDH3=N;5xmkFHV+f1^vfyES|* zWI4+S2{?-G5UBgS>{|V!%_m{=V|7{3>&UGiWqlO~C76o0U-gePsw;<6|L{j1v@lU*7bhZg|(d&c3=H`l%b!x+72lC!W!CXX0Bl@>a zC8=?v1`>i`9GK6-vRtL-`6^5Wf$Xh2lo;M(^_@zaC~EV;?9N9tOs}9`a9#9}Y;;Ku zF=UBvBQ%m0V=w=7DIt6VPy4{H@X=u?+{o%~aXa`#O0H;vrjDxTKj?LKn+H*Ux0rcI zc&M1*d-X{OuoDL5lTyh`d?TAO;*d|{KD~avW-r&}Y%j&oYu7uK)GmRSu2ErDjFzPm zS;G{mX@E4DTeVE3In20BjlViMdD=Lc$n1MQEm-?MG+kv>RnfXVbR*p@-Q5i$9J;$3 zM7ldgLb^+kRzSKN2`T9g1wlYUx_OIx-#x#OF<{8rdwn^Bd@Z{Ok_J2QNn_1H8H@4Y zO56Fy+L;cctRtuM?Miua?U!F#<}RCqlv@vKYEan%aR?kdA{*+yf&SpWkpg4o|@zQzmgt+~vB zUk=)^!no3vXhU4YjUD3xP=afGb(KU&Tv|el?X*MnYi{qa(&*n{pV< zOCjRFH>^w{M_m+l2D3ksqnJt(HzDxUYk3SGNjLL%xv`)H%^DKabO|l zIA1^tNFcAw;gXl1gp+KjJ8b?-kxJ(p1c>ZxPb0L%AF_b#$UArSq_!^e8b+=*y+4fw_Fy{V!=@hi@P9Sf;Rb}Ec zZIc_PT&VNpK{H7ZF-d!+=(i1iXWzG@prEuSt4;onBaVQDuO4j;dCQIBA_*a#xsdF{x@gxh7Mv3>j zqjSkRaXqpSPU188Ni;$%*La6)l?tgWMgsRf*HbZL-=n_mK9-iIVuLv+l1)uKw{bTo zc8W;w9upfSIFi6c!nZ`2#qFO!Fv$+TE>vxF*sQf1HXjN5-D^$JMZM*xPf<~F$Tm5D zGr9F(g00aL*Y`WIsRUd3Y+`4k?Urs=ajFz9ClBmv8HYX8?vR`tCtl=%5;&; z3bev*&EFMs;hekA)=59Fhf*YzE&K7jl2PqwwjY{zYA=g3Rf6cyVZVDd%HeGIcyyl? zLsT9#B18eIEH%giD{~D#q@Pn7%|$+5ifT0Nh>^nrSyo#TLUWI#fhU@QD z?*HDH9xVl%9W<>Ceekp~LSf5}8_0YsOu^w(?YIk5k?tpDukrFB|10DBeWf;EpPTnv z*~e}EdzLTOhan?kqgII!LMkdPnY6b!``saEc;vN)J~)(~+a`g57ljEQ=tvpd96T>B z>5D2p8J8`5S#lZuQe~>!un;nW z%Xm>(#DhUsq3f2x=JIv2VqVw&HXVE@FWby46J60C{8$wjtyu=#e}cw!f94gohWG>; zRkJv|DkdE4;ZtB-AbfYZa_qF& z;`z{?bLXxyA8JiUvcLb{=A3|x!*K|GoND05`%)g1^G#%JkE&kxCPykVy*9Ofh!11c2PoE_-v@YFY%UDT zp;?Z$qc(gNiNLL2Ou|1h8MC~!Rpen+TiEA3-^QlOLZnma`5;uE zg+qQWKdRz*!of2=;Xr?!eo^_IiVYto4;^UqiItX?X21djYRs< z^48jT(y9%w`d|lsQnf(z;os{c${DNev&h?W1*wQF4XBjjj6by`EZmuV4syEiVe$=$ z*=G**LKcA2!**(z@?4EYY$d}(cf;;akHLXiLtw3o*_uxYT5`HT2Jy} zg-vh(5I#anrKnngev7Rr5mYivu2!k(z%7l8Kt=Ztg4L3b)GL)`PZW_DH~Mq&2Zp@9 z*DJau|PP4~c;u(gfrdb9HPCF)Ics4## zMzK<@=I`x{Fm@%h)12k*T%Z`sN6KEwayL(PEuwW~wnW!pO6s9}~t>okUhc zLZ&NGsUlL_s+X$SdM(u-lqR!o2UA}a?PfhhKHutUPuTokI_7`r4$Y8{`{$@?>^oef>qFy1A2iC^7wh=`~(D(9j^<1)PHI}AL3R1H4kc&ypO#nGT4 zf1fJH-T%c10fyXRf{2LVow@Dq#$k+1x}$$vVJLEM>w#}6F?P?B-);95`Cr*$@#B-Y zSUuhylnEGz!9u#=i=2z*;Qfg-Z>1!v5UV&ySe)IQ=1ujzhzwGPXvYp`e@@c9CNc_2 z#g2yY$B~^NxU6P9obqjBqe0a zOpEK~hQ?$-G+by9Ea7m>EKYnRj2K|>quC2lDq07=dX8HWq-Y>B6Gc$LLef5#SP+~# z*j9G98D8X##GuR3iIgYG;zsytv`r^w(|M-Ix9ql${2ZXBrKOcGQ9a3n2H3MOhPY9G zXXem?@+>|2h$7e(t-hobcH6nGs6KugRsBG5EV}GUql;IX^wDPEtf6D;_gUxHxHhX~ zoVSFxu_~q14j{aZQa-G%E~UI}ce?q*^>6Jimh$h$-R+)tTf6FhYDhHQ!eroesH$pK zVrj^EOKE*&?cWaaWzNGxXi*+w+xax$lU3j|Mcd2K{%|ciIndn@_9!JBNDT||7ZRjL zE@W&W``1?e*hlJIZ7Asd{f``<3bNTrfY~1S>jlz-<2F3NJ@&WvgZzFD6Vb~J8~vN+JD%yVkdmt zQlTD-%;T`}^FPDN$LBgou4o#(W_uyAzNjbQ_4g0grxgPO(iOv;Y1iM2MhfPyVB}Sx z_AjGqozD=cA)zOUwf-@647)p4UGkBb(k0uu$dd9j?`a{{$*@>h+A2GIooUuDE-Rz? zArL~NUE$p(NP?eSz72*TdFSh$sait!t$v@R#6(BzU#}-&;W{JV$-nrCe^zRZFqaMQ z9vL@C3Kfc>!=yO!ciy^lTyR=^>isE(B+MPQ&-uwF0%yD_>FSLOb^=Mh2Wl&;6}5RC zF%QG4PyYqluBs`gt%bj#rYJ0o5|Q(GCja9Dn<4kjqY^uh_vQKIQ?roH_Bzdwix0}zq&leRp8pF zkdP-xX~J;)o}>>MqJ~Gpjtk-BHd<)rAcu7@@7un7B{0e2v64=JR2G3H)k4M0Oq3WW z&shi)rdbGI%c@H1;E#rd0;Qr=|5p3`4#8ZwM7Ay8GJDMHG$~rh>$d#t{yzncd^EI< zaWs6nr=*b?gHA~i$!Mc+@gFS`Ov!g*znP|#*YcS9xtHy8@zTD1FaSyw(k3MsN>2E&2Ug}UxaXEav z&61RK6>XjF-f%^X7<(-!Q^ckQ_mA>Y`}7iZ)L$->O?hKWMlgNx4!rj2JQR5X&i`ha zwg}mB1zdkE-gvSj`#;JR==xk&g6ql$wSqaRD(*ud}j1Gf|*L_-j^ zu47&jp0P&|9)Tgz3NUe5=t8JbqQ9)eVwnjO$#g;FA`h4P&h}Sucb|kJDo#P3U}x4x z>4d6ZGaunGf84$w8gcXCz}7$=!cWc8nxWf-ajp{i>UdlTgz5h-x9*;2b&0;K{`M&z z!}bkIg?$u-fCuW{V%E_c%(u0jTte2<*E!B@kMkw5yW?4@F}iiS5GpL3REQ$Mcz~nd z9k89D2-R2wC!1I@dD< zEIH3#uDb6UJm4ZO2+-pHoi7D=twoI+u6O(IT^yRff0qao0V_zmO|t!Ktot+ouJV}F z_)zEYKvNxcEeS_*qTUl3u}G1+nAL{@wJ#E_S&fh7xc&3DsRQ2X4-Cwsmc9cI{sB6E)--UC<&(Om1GRy8Z)*Ud*Ps(g6lcq)x`*9p_0 zJzIk3HD#;c_4Gp1v(cRTCplw%t}n0(u&RL2SZ4@Vz;=(*r(InM0c$tKibT$m#8oDOF?o+-j_Z6iDw?0cj{h= z(aQe+`&-2K^P~AGnL)-&EC!>%IFSKcYr9j%|TJr#ky2BKK5_8@lAGSz4ef^yHJXX^au1L7Qfi9dDAuFwZK z=`ZpFSK}`u=D&NI(4>8)_*4F8F^uaw5!nI{Iz|b6Rrwlj6SpMp8(ROu39&LaE z6F#Fy<7A&rUF;#Ye!N|H7U#~N;@y)sA9d}Cwkvvj5?Ijoa2rL-2&HJtKkx%kW10G~ z9xMjp2>4FNn|QR{vA1tO%*4<$Y(}WVX?(5-HFMcfmV`whtf}$ypGha5(hFB)lc|4G z^x88|}Ap%x1IoK7=#+IL93RPNt$4 zxh_8{hf0J627)8uP1yOgg$58~;ZfBqoOv~z-zej^r%iVbwWt4s7GD z*ZaE2&2|S<$dd(rn#&)xBHz-ILlU!x?}sUgUL*(G|cP^OtZ53qI-&YR~Q zf1VABPR=x(_}3`2YawLd z$KC%H(R79T*kvI8!kipr5b7iiw+RkQaCvIsBN726k1l1Tsfix~c1#4Z7=k{)ChJ*m z8!*PfT>rT1?#TbR^Xm4XCgGcf)CEfkI^uqNHA@7i@|j!pKA9RsBmI}ZwTpnKQ3BWl zf#FRcLPz`WKUW2}Q`w%RG;&HfaJ=LKzl{F)9_hu=qvv6pi!40`NMtI>G3MUe2JaSf z1pY*uDJvyHoaH!ysOcR2q@xQm#Y6?vDMbZJ=oKswarfgbMU1v9CYa-Zgt%iuCGDCR zgU={MWIHCkPM7kvxD_ID;$GXE?&LK( z&n}0Q{|^h`9k9o?<5ydC^}(LUEg<;p{rNL&Qx5@3%md_Iok};hEs{xB6YZw=65x+kYCAsyaJf*v{JjoCa&NJt z#ipVR1^u_yI0zr51_h2=MR{w)bnZ8cDcMZ9MFnNomUH+=np%=iA^Q1`!hXLOzJ z*9jDXJ6X0Wxq`JGd(#_F4;$U!LF34HVoDJXmrZFstpOhCQGh?oAYv zYRvvfojmh5OsEQSKb+`9;(xQewv+>>Y+x|^uiTmw+K{{T;jP`+rMG9nVMtEo``W5; zunWl-H6OElu_oqw-!h1}pA$e`SnS+-%#LQJ@%>P8PV-v%^d7EK1GXsj@hf6dDwli+ zT)X$*)pq}(tm+x{&+qsAoV^)vJcoDcQ}yEXqfkalwlA>9=X^}N(6Wx~R>xMB$VK>{ zCWEghBm#Dy-V3)2rlOKlAB6$;C@w7-ZDJnT}sZ2K42Y z2~fTv(UG1a5dEInOH`QZ?y~1feP+~?0!jG@R+{omqRc`m9fcLWTi*g|n`CgsCqf%x71nS^OTe_zg9 zgZ<+SvIt{~Q6$qx-=po4d!jyCJ!)#kNjivK_T>s|9%&TNYVPgyzk0jUq#g3Y(fM^@IyV&KLQ^*#dOhs1GtZ*z-aCV>=@A*=~hlg8npXw69S_(>IuAUD(t3_ z8#|qR9rbV6wLn3a7We*Aa_1`JZufv0 zcr=RyoxeO<`jkVuHDZ%dub??-7uD)JL@_kdI`A=K+nDqf>08(D1Fl0)haen67)B`e zLPHvg1^Z)AD=#7yBwepTcPcV+vH=m#OOL+UeT(_+%lRqG72EM#F8|B1TFd3Z##y=) zxgt?ZCK2!6M&n9NIR(k9^mi_Mcb+#BpVM3e)xsy;+4eg_jrl%?j#B^lU6v+c6{GR9 zpy$J>xh(q1;e_#uDG}Q( ziPxz0daV0xDomYPu}g8pJT?=#t>9thiFk`&%xZ!v#Ai3p8;IkJ&e84BLtGBOoQb*a>Uxr`5zfyXsy7qfI0(n%Rq#fO)kyyfiF?ERufnXNW<;&2b1Bm>KL;3T zt+2jd9{A?h@B_gHs)$TrA@C}{mox4eNIz4me0K$=Cf)a=z_=NrtgH<18Cum7`MYxq z3vhb%Hse5vqVdfV_h?-NR<1jK=qljd&d$R3xRMQKIwZ#^J+{yY(IqyEl{%(s;Qu~A zArrCp_OuPY-m(P*c)#mUxLF;>cqH<8vnMV2BNu&~$>}zni(ZIXJpanq*O!Afz9a2U zX4Ssg!{907uY{S}ViZ*A<3_|ZrTw9gFn=1=R;&?;kak(a-(DF*>Qixq&1Bv55dvCe z5_j7VXk)a~!~zffnMcB}d(5;^ri57!~vLT)0CC zS=`ps+X`FRqz5)i1zH(2%<)Mis?r!6PiD7q8r<(!j zp-jG-_2R0Easf{W0D|M^<%JN5`JKzW5NiMkI8X=yYJq8vpli{t?>CDiEC4nUrRcud z$$7S20kG^*UgZT(ZFW`V8Npr3E$ME15~z{urromp}8)5@6XSqV2k}Kgs|9*V#`s`nAqxc{p^q)#B9&J)DvjeXpS?urJUkz z-LoisETukj4W9;bDR(9dej+0xs`fVl5AWK5R;#=5JA(gdp+zJ-l}9`*_oQ+7_m+t< zz16z0=ntgTIjfnxuM>!Qe5VJPQs`BPQi|r%O=w2@`I6qt^L|vfA2DE|MnEoslqjLa zB)YY+sq>o`-K!Y9WebTtvt}!MPePmc`Z_|=H!5Wl4>_b;A3iQ0WH)s4CT7Zj z-ZBS&)rJ)WJsajr;9cH=>BD#)i;mF_aoTY}8rpdxj<+5MYD5IO;jC8gnJBbE0}W^c zuGU$Pveb0IngRF@AyY{Ac?n0JsBawrbK<CYzFX7Vo&#H zNT8APJ+&%SgQ)51W{my@fivjOk3s-u9V?LNE-87n;5niFu$CYZ!jm(U|1jirDAr=R z&^5kt^>@vi)4z#jou3JHAJs}Q(h%O9tCX0eKDN`BVI?`QN2(I@MNTySbkIXaQ6^AU^Yd8|Yo>jUdACH*u zUqu#c;b1Z=(Bak@*3m#AV=q|f!YUgm2VzMKrY72}$rI%0l`?&1loiw24f);uVyV$8 zBJz3p;2)wzCwK|;vIr{a`aH|uVru<_k3dfvBQ9rNHym$e^$GRXq0s#sRLAOBxgqf@ z_+&S47a4-*am~{4;ll?EQvMc|!rGF?T`j_pLqq=K3r`CA2u#{9j^xhC>E%on6`4hd%R+L2$#4HfA%tJI#i;6SrR8p^Uo*X>S*oh ze)8#AprqsBa>W@aMh0G0DV6L8{$}NvH?)*Q(g)Is8mO3Lq7F5|#}65%K^JSvIVuI> zXe71!26D>e3u^^%C2{hEt3`dq(#S&-ySN1OP4gR|p(*e^e;i4n5oW5w{2ZDFDOFJx zDI?5Lq>QH6Zx#hx zM7f_5uZSX;{M*`Nyt-0HinqB`oZ3IyM|B(FkOav{!x+6ZihHS+dDw}lPLhd(58ZA# z5k1H6Y|E6Q9eqy|frOej6+P%txqIrH7lvHZ6I-nt?MR#T^_Q#rYGyjPl>XBgZmmESbiIHNAHlJSEX z+A|5KV+{z9VInq@(Tes;A%avPG+?7W)O@t~G^zR&oFzJm|5uczVxE|!(TH93YFoXb zDmv+FTq?a=tBK*FQ~U)O@&{ifrH#IA4cgx(px#{8sF!aomb%F1fPutgBRaY)b)zMC zhxVqzzizI4>DbpnERQUWc*yXj(p0DQYgW|JQU0JFYs%s4+38;6EIv;MgLK-iPc0C$ zY;6|4pH_mjcs+c5kc-cyt`S(&vM0x)_c=A%WDC@7zlEapb^L|bT$>c@E%q`?4iGcE zrr0iRd0dQR?IU_Q?JR})->w!%qV2METK^lfPf2zq;WBeb!_)aolQtW9#%+4CMmWnx z=sQu!{c4UC&UxW$r&UQ%@LA5r7lTIToblVcNGu45dtCrfjL)K@m+m>CX#~6bgr7G_ zDEya1rW)a%8>>5h_XGgIfsz8ieLp%sT{?pRO3q_51_J9w;MJPMUA@Esu$TDz7oO98 zx){6?o4|AH*+OY7R0{V;weqcM#V#`H8_ygMC z_7A>%op7~GOQLKrMIRsAWd!pUMkzMSrm~_i@leQp2TzS`CAA~yU;Ndo`_(k*&JadT z2~)|kg&v$=BK0ke+lPk?*+uo6>x+-YtRv=FKNX~qli+6T-^FHA!=WI!RQ)97!Yk}| zkJmIIwTVlEm9m~{^dD;CC3sUK;J)0*lc>T=QLxBcSG1)yrr98U{Z|F) zdhIwUgX>b_ap)=f8}yM}F`X4$up8jB10eCwY$H*S0CyemVGW*Xoq+1_Thx2k*xMnS zUGa0SH4H0l1XUbP_t(U*XxT9h6AdGm-oiclsa^<@U;t~k{lSrw$ffgAljXuupm89v z-PCQHI`PUVVLcTsacvxY%Ia^Ep952H%HtOTY}rDA%Mq>a^c*I<&g*}t(A5snc7`xN zja75HC6bC_2p+zR$bARuc|7PtROCc88#06w3U9Xmai9rp4U3HGLLJBXEeTK4VOu2H z+b-Wdu;tl81gf4~Pk|ppuyWO5_fV*b0D8C*Q|jxWnf$Qz$K*y}9PKw(sqLUjpQ8Sv zt*uQxt922l<%VV;+E!7PzYLJ}Kodv6eP@g#=wIx9eCo8wQOk2Q5(~IKQ4PMG zdBzhvykcFfe0+Sgs9juLMgLe|fA%~b3BIf^020MO>rLYRq+gptreszQFT2JIo+@oC zr%e`R&rfRM9KN4EE;(UT$>MtunX(qRz0$^fR|7{V$$wYo3Hj z;&62g5>Y_6twTZzZm$g$9$}omFB1s$xw7g42 zp&<6#nr(}x{_CucdD5f7TQFRWU)Jxs$E)TX++DtxpE72B#hVR}#|lsV~s z=i#AYQ13MQT{u7ho8!Y4WEg~qI)A{m8XS~^01LJtPynT~8}FC(x1Oek{U^!t#cv6= zRU?(be^=hJSYO%L2tM5&0h2ozK)nO!_W+K(&hKI$6i?mN)kirS_oW)$F(AmlS!=tX zeELWET+x6>whnkC4)Xx2``ocq0X*4q<3X9k!=AFhTJiUnFly?(>DEq*U!@RmfYSnp zQOgMsv;Z~q5GW)+KC)}?4^8cE?Sd&ZF*h*b4;O`H1Lb74;)w zLel1H3L;avb_7-Q>`!9~;W;7f)>QUhg5fBw04-CkFFNT?#(?u4nVLQGFPBxZrAQNt z&j7TEZ$TeB?d3pCLk}@s5;Bd>7Y*?j4OTmJuvqyFj3ueuDA#i-Mw5KdJvjgwdY8hW zN+A&xaDV-;)E>|xtf{9wjghG%T*!v%SO@j^foga?kO1%a8AV#{l>&AX#2Y_bySpU_ zVz~y{=}G6oxMdw=z~^WI*abkj@d-qERI{Zz>*iIrQIX>gVCGy8gz@~3S{bg>ffh3O z*4gh2(?{?scVBfLD`#<+fJOu58+a}qm&=aN6zDe3gU>Zxx4^70AM140ebQ;Q<)7-) zxoVkG*6A3}q=%Q+<<*sG@WaKPhu`;9rUJEh#jj~gNZ!%Is7B`= zq7*u9_}U$|0X}JGx8qacR$03g28&KcoF0|I#J`xH~qcP?)P%c5G_=!-Fw({LofcmiKalJxGIq*!QBu14wp;Z-@ip z6fo9K5WoG?{dmyb1;*My>+{@}+jFnMoCwr2M0^gjAfI-fuzf(chwSGeJT{-%V;!nKO}J>Wz?6QlR9-%$qX*$=}oUN~m^ErlyUez1~5 zkds0}=wYO&LXTK=x*PX^W7Pa?4nERd2pkf2ihbW(}hbG0YR>4Wl?41|AiWS@imG2^$9s$&c zpP&E1w0()^!T!F@Smr;lB0B~F7bqe8-UZspFa>j~s{upg-d{jX1J2X1>5YDaT@vb* zlY_%8IR7<(h5<^JM(m)hYk8*p12gG79$U-_+;k;b7xTSK=^hpSJQ z%U7U52smG?_!1t~)>On&{{d|hAsoeK&w$q9;xgas{X%Yo&16oUUGEpebfe&Y3^YTM za-9)ZI*2-j37yqa8FYGc*=5ldUG;gH@Sj9SHX%eUYfay=?1XXyeY$&r{=X%_+1F7qfGA)zzqa#-_x_M zd)q;E4&W|=ZlWOQcCX;^ybQc;zW*W#@4rlpSh;3pz1 z=I6o8SoQgw`lB$OkNf`SEYMm694Zj5uLYdcvNDD|l{=N$E_{Ptq1{}^Y&j;#&zhf5 zJ17Q^vV@0dv&~wm>BH^#Rr9e>5I>p}u1%nfana{?By5&Ed6KA~!jbt#gA=h+HzRd9gyVTat>PG{oFUgtjbUitl2fC`T_nk<)kp}WQU8;cT(z`?hg_v$0;)8A zUn(K`3F}heqPrzv@&N*)jG!TEs~iz{cF*dR06Ph#y0)4?h7XUpydKG_~C$j+YJUBSL^=AAqj>+K^%A` zfzs-3|8q^yZx&N^h2n>X5kT&P+b_V7r}_PQ5vqk_P=1*7;VAr_i16g4*wsDSoy7H@ z;Ur9XG4%z4*m*RmHw#jtBqi|EOgrumH*Zq?s+>G62OCzsugbsx*>CN>78~wME~^ac z3tCRrTgUHd`Gwq>F+YSHh%|9}blZuk`*T5M55ziB5fi`{w9H60AKy4HEs79HRVI*y zeRR|<50a_o>SNLz=96YlgY6MI{`>b_w5F;mo~yLsZK2v?hp&5~8Yl?6K3+1KKiiBx zm)Kyfp$`lNfD&Z@%otHS=!$fz3>twO_OpQicsqj3x{DecJ#S8bg9>#GTr-dj(jZ;4 zAxcao@IT>UH$%|=u6q7T1EtP`fb-$O?yNAu|qNU&J#*)v|C>Tg!;Hl^K@qJFSwPu~m!`{7wFMPy! ze(abi#I4blX@4qxHc{adp%1sYEK%>_DI)q`4>!u{Aqj40OC$e#I1YXSPH$754-Avy zh8ZfKAM$rEE-zs>b3cn!8WEQ?R8%M&{#(mT5u~E zZN<;BK>WSByadYdcLi?M({Wv{L?NjfHxjrSs83=6eg4j8^H&=io$kkvFJ1(Abkw80 zT3+N=MwZlAplceXCRf8LEZ#c#U={eQO-Pe)ET_}%@r!SL*;w{G3D@S5G;S)6q#NuD zlfC+An^XsQ=6h&=IP$w8a}|IRVV7qNslo3%`sjsC)xZr^47s)!g=&eVg}Tshj49rG z>%Nl2{^61&0$`-YgCA3$ZImw-Op#7@_UFLLU z4_<2st5M6>+FEt;FQTJ^am4P=8R_&p3rOb=-#CJ5!FuF^pa8^(;H&lxKqUsM#109i z_Pdp%--*slHTDAL9*<1!e$V9yIIT5%t@*cOlF3bkVk`2+e=eBy`@7A=Q4sEe&9(p1 zhBXI!#_cNaAerr1FmNdFCzw=Jh>J!2BoDy}9Y*8DK&69|hwrAu#_RnGKhDj|R0L@*d%NL( zs`$@fOXG?^G2me39q*n+->V&gSk{mlLs|6!V$2h6iu~OguUM;U!R>FsBEDa}9a;~e zY$>6^NrV!kJEmDIa4NhGwR+MrGBsX?!cr4(tnO<+CVE?nifFjGlZzO`s9*^L5w z7kkLkOuC}WM813Xxbc}|GE=m(smWpGvg(ES2~`8IBXX%|)v6h44O_9%H^hc9K36ZY zONPUOsU-Q?uDsH%cQ(E>?Up?cslc_vhA;dU9&!d-rB;s$k&+7USt32;7aNb?FA#Jb zOwF2MD7(b?w0s$B>dZTHi69jkM!oEXB-@UM3}fhdUDwZ$^*IXp#~XJT8d>Z;eeKxE zG8?AQU4@uJ=cm0R&Mlq}*)>c7t@Df@txRc8o69Uicv{V&-=_);{-|4cI0~- zi-za$X=0cWPaf|+C}HJzTd-zDBPB6AmB+j9C#%5>ktvbW-=#W*=As~Z0 zfwkeme3he}oteH4aGbB2@9XOW#@Aq3AS5C}N=V9MqXd3tB2NV9`G|vY(R*A5*bt41 zj@^8JkjD+Qw0QTUxCE5*MQPlhD`tI!a0os=A((zl+SIqKt)HKVDkw0?yhEoC`CE`4 zRQxsi49%eG_SbDzwX{5SZtA2e7VpRp8pw;Dm*t<&`1#t!O&gDfYm>n|%P>z|?ay2c zR<_PG-hnO;NwOT7t^10QGa6Pvxk|+X%WIVa9yg!C;=f!En$pGP_!KY7bc{dEIB`p` zz)L08ktQgq{_hZMWlsl1fu2kIahIX4E<+5>XdM~>tGtpDZ_c=GvoFySEVwXg8XBj| zjy2@s0eYXNL@yUD!T;g>Cg2%$03!6++1WFWAv!u56mxKrR#w}|bc)X>pr@Cak&%&? z#}-4QJVUDhBw(aA<<>cWOonm&4uh7|wZ0JvUagpMB*Pr?Cgmp=_926eVN>gGy{>0sClcEs|HF!727S~>7b_wc zNw3t_3WoJ1_?*@Mc#BuGaNvqg@oCDgeo2dYuCbn04+PIM$ANGONa>r{)M?01`A7rYYqT#6^n7xI#X$&eEzYoP+uI1HVHO?I{NSgnk8}yY8Bg77o z#Erm_Ro{mHu#3{o6ahUXE#UL|d;Nu!d-^teToPBZF=`RV5KeO@&z3X!27@}*G>a+` z;VVd}abuHcAHu)6=FaR{>CfBL!-OmxbS@tzC*63Sf$Q*lX9ts%|1a?K6(B-~xPWAk zF`Aa0?Kh(=AZ2*&`FU>t!VlDNM!LF4kdcv*=|1?3Q9khJffxeblc1|LZvYDEFZ@~x zW;$hrj}7*tJF6Y5YBG!c-N9Di^z0vQ9UH5Jf_PqI4*7qnsBo~i|L}nyi8j|cM}{V3 z*C$)ue2oP{72=1ME9Bo)RQYp;qpRAfp`fA?#t;nxcYC6ir~etFqP#{ku_%lN`NTbl z@clh~I6DpNo1gdkfhiVIm=nkXT)eP2kbj#V%Z+|;RFJSYn&K@Ob+}*R;*>J}7bA8N z5}Jsk{`rUk!>vzKEk@OjL-#B%&seNMc)Yo}L8fNvo&ps&MDp3<4r&+M{`fpQ097tv z=`yjfC@LuhY=$Dabg)1^GBN|3FmPY|ua3ZM*98=CZf@Mt@?}%v7u%W*VQj))G{90F zXxTv(j^H3{oJETuHkSoeKRFVexz`y@G<~#V_gE%yMPs^0F}<8^<3*6cv@b4kkq27GyZ7L6*`> zCXdfvny9g88+1~edDB3s)M*uPBx$WMSZ5GnWrs`6vmjP50b(zaVJYk?wU}xBFGfI! zpg|6k^lUW)R3h;UPa;tSWKZnaQsv%9fut!*kg~bspx4|U|0II0#!A}n@ zErRNJD9NIU!953;R`KKu3|!bbo)?JZR1m4_1&Iyupnz^6`u)edblKOhVX4&5lV}l~ znDJqlV41L}Urt4uRQqsFO$ zADh;edr$5IX;uo4_5HQ_lsTTXB%E3g90XFKF&&~8q6nw_5(o12w)1a2^BGIuPdh2( zDZE2`cq#>mEDroD4f10?P+&C35cDjtsg!A!Xn^t&HUv1#0Xn8hwO6~9l~uKBfH>u7 z9lVrBWl)Xlb|w?ZM&z%ze=yI5{Q_+}av@Nkfe%tp*HThKhk%*f^+V9TwS`5I+@~cF zG7g(JOiWFkYxQVezI?V30jnCYR$%%{e9bfDirx=?e{oqErKpb!*!_b6ssRYh3d!OV z#}|HL6q8^j#}===6YT;yX)gV}ZEMvZgfsjq&(kdt7nfzJ# z8y1N0&9Up|?N!3fxQSHZY#lU38FE5>A}m$%V!@Fp>fGlIe|sLTSQsH)gnj*>^%}SB z4#rC!rCXg84h6`XgB5)-S)Zg)+a_+~1IxxlH-^>BhcWAyXqyM=0|Nt~*j{+s%#sF8 zrme55v$|jb0r#v>jZs#;1GCThyCriLi*zEQiD>lk&fv{C7@9rZ1BLIWlXnuUAf1Ft z{Cpz^>XEH~o6d+BBtUY=h+S7fLBY@O7Bv4rYl{!a2LAZ*!)wl@#f6%k9rI59wVWJ) zHUxr_#;2N$W4TO$o|-)gVJ~S@<=0s_q(@CI-eB`S ze%y1@5hg4abq-UBd{uys-*$iC(SD>>-2gQNvWZ~ENChGFS(#vfk!(=qQNpVGc_n}( z4F<|^rDEHRKe&6Tz$p>bFWrMFHx@iNl@1dQFZbl6q@>ng@_QeGVkCCwUQuw27`e8! zlEd;uP)D?NyMlpvqvwGR* zv=`gIyUqrmQoI~EbB5M=_wxNS)@#jRnnw7S$c6LX!%0GNA-gPjcc>qZ9=U>_dSCt~ zbb#1t>J@XXnt8YVy(BP8b0`VJ73{U$6(U4_06ORUlqBCn_z8y{a)S^0f@Jn(E>;JH+roz=S6^zdkq z`{XJ}#>dAOj)?Kg*Q82E0)T$`|Bt5c4y3w&!#?&gJGN{hBP$_$6OviTmXWFEtDGlMxhp&d5r$tbU3qwQG9^Ps{gHq~}vcZrw~3dFp$R<#{z(F}1E;fV9qVbAuR z%|s?CQxg-yXv_pIMe6qgYEgsRl$~lhqclX)F859PakSoCPW&mWV2`KINzRuF(z`Mq+^=!WaLLUiFW99VYfKaU|Bg^MZ~dSCLH}BBBXnFQ zcg3DvU0ppp^Ub4usH_a1ftr$X>g(5_-!o4iKYkoqYcP&r2wr~n?&Zr3sCgZnof%Pbfcd+O^)ZQk-4)Gx0@G3|X8N?>Dw2}667A>?ul_M^GB%DcbLav8jfl2Xdkg~)s} zKZ^DBHQjx|B(a~7-S_7-0jb+k<$~QAf(lj1`-}`pdT-ps`o5ppfDI87+tF zbmX(}JybZWe31BOV-{SfaJz%Az*7p!JV5Du9_EhT3<0-l@VKd~tE;SZ6{SZnj>E2J zMuMNdcbhT9?nKF>Xq{Jo;eg8g*(xn7D^rT2(x&KdP(xJK){^`>&%bS{49(Qw_s=6E z@I&}{cz*eHg)yI9{sV&>u-=q{C5+$l2IDXvG=H|~l&A6cfFkWci`@Q!L`3Ei8EcYS zLNe9?zPXRQw%hhyhr3Syf*hPur>x>pD*2{$Wn6l)*qfWXj1tV)JmcE3y3)hDNMdcx z;ZNEAgI|gjbmD2vXX9HWuOaaXB7baCrI$#qm3Omw2KIes9PP$IVIWQ1^RX`alcv|e6#0!9 za0_ELW*XdgD@ohqZ&xd~cXnQc0XZzE)zQ&m3_4qZwYI9=&5ey)Cw@6E1ErY|%Z@$t z1D9kq`?3{SHgeb~qpoe5l0a|D>)Fm*KfWRf2?6ggwXj$Lrp8^TSqjdOxvq^yL)i6a z+5P87HSB~gD=J!sY7O2O2-27kA+V+0IyEK5-ob&FYVhd$pk}_f7qt1%{A;)fXygdl zjjwY>kx)L;GeIG=F}f}y>737C&~w-M+b`HxOTi!`m-;hLOl-pa<>vZ&M2>thZ#s$< zN!o-P>JVtkf#GJZFBTJ86|g*orA^97rOlY>SXTnVB(J=l&>e4j&12KZg1@|3+JwakGL- zk~nw$7wez*$Gh^KcTO*^N1lsee!<<-Kt&YJIc-oMOLC|R`koLvk+_^KnQLG^n2>RJ zeIs$8bjsd;rwa8=!7ptqR2=wuE~j^JzDXPkR(0X_;*DRP3(*ky{xSxqH+aFSH=Cgx zXeMs5vs>HPKnI0z70(wn+J1D6iWirt<#sDS8r2&t$~Ch5l64#SPqQGTmXqs+!2l4Q zrXNTF2Q7|@*IhiHot+(>5WI3SEfHwov`{kcnJ+Y$&Y z6Z0Hn2v&IzLpLcS{6$fFtHSo-TP4de=Kw{iXBgW66=@POIjl^8migTQpqHo3uX*zW~ee6({4$dNx5x=Vbfx+~_!2vTNETmklOMu<-H=pOyE|p|lsx*Qn8(OJJbMm4#?1Uwq&z z`Hj4o?u1TMV%byNE@y9%g2&EI4iz9>$c#KIAWn&l#Ld^Jc*xz6osyIjacT`KsDml6 zb1rpi-E&N+3noqwng7A0<{BX( zolH=tMgrdx_B%v&r>pU4?klEvvGExcwg=CAhc}NqXu|GB*$Lv7Yvg1Jb9=a6O>z=rQG9$sDX5?I zRk#OoH0c|nkNAhXy3)pyWT#AL`!nx!tcLsL)Nf@(aXa0H3UhN<@e9&a6DO68>;`f z4Z>1(Yy{jWA)(0?SIFa^|M!CU1Uwe>fY)HE!Ah#gj77*H$YhI?4CmTIQjj0t&iQF! zBM~Ch6!hZBEvKW4e-$`13*SByEt;~hCr8ppmqc?$GLt^gX1x|RGXBs`uu}IzvEtVQ z4W>z%J02e^Nx2AUIc;^jYhnWWzf!hSFX&oZ@kPDRF8VH{A}GfStsWB(QGn?_0LR^b zj|LZ=rQyy(3dXw;Qlqy6D>#pikNqFX3^w?6{Y$vJSY+3}CTMOq`QpmS))q8V4|H{P zT@p*0n}0(}-BbIAGn;z<{(ba$S^RnpVe|GGplH3X3#6bU2CAo@Q8zp`2_NSqOkbEc zK7alk0=|9+VBJK@;+P>5+g}C;fs$fb_X95krWruYZM%o@TCW;7&v1rb2tTU(I6V9g zX0ZN}1Yyiz&;*M{V9SVq2VwV8H`Mgb@r^onIeB@(9WD^`U3EVoe8k1a!}?+G!~zmn z@Cy|LRCxQ3E)nK}*&yJ%9*=0A+l5=Q03+|y{xWjsCd0=W{kkkRZ`+ECSD=>zU5ljW zmX?(jTfrD0qHtD41MeS1ZWYo!`@$B!y=$0W%XncxbB$KW7M?w1JTx=v?NiNDOd5oz z_`+JE3?K46$j?CF1ea$K7BQ0EP_BGf)zEIwb$NB{puu5grl#{%4aD?u&_f&^uS386j#6Dh{OK{5{}N;shdOHdBKOn^ro zKBHfB>Zt#(C#c&jC$Dc+uUrS#Y5gQ&_~vN*GT%y=Hlfu(|HP-sesbH>yDCV6wkTe? zRJnB9sj&u_lo{6HpWdiXG!CrYrcE%poQ21MB5Snt24DbA9z>E)R_}42{bZR>FCxS4 zwQpjL2zz(~l0|RA?QHcDPJ*xy;iMZ3oP(>MharPp|QY(o(4yK#s%jVqhc+C8a?{1n3vnkCTU*c412y~V`JzT zvlGWV_e}$~406SeZ+rHhox?8Ap9(97zV+0e75(64uJh`{4hf>?yZfuDAA4Q5*`w{Y>zGV!FEA(AY-}vBsRPcxXTEv! z1_*TkgTl%tSvxzff<&9gzd}b5Tldj(IKk^^A;TVMPm$yH=&W>G@v6N|X`WXcm zschY*x(x-7;78_OhbQ3!-`$%U8xbK>_UZ4o-vlKNga;b?OJalL{KBs2i@v>8Bum9a z=<~2U|DOx6?J3^jLi0PGcmt-GF3!%8&Dw^iCzjjgW44YWxdUr&=5}clUgipqoFQP4 zeqe`BOx$F^Gb%DRN}6GPNJly1_RUeG)3J4+bXk<7W9K5^oiks%a%u0aq&Hh$ZN^fI za=t0ZP2uEDw)X9ol+#w_(!+~Oz%u2$-R+9sjoJRRCzrg0lX5_W`I~KEhvy(5bjHz! z#^z%Z%i4bl9VcInAm+{0Ca$=}OFAaEQ6<@!LvDqxg_4{+H7(8a`iaWrGssrDFV(p? zIpOK{Ks_D)m}jEStov!fvq+09mv`qI5K;b{_<+!Q{CGw$BK}1Tkbq^BmHI0m&3^gv zMLWBceT^GKht-59T#0P}UT*o{+scmujIWaWCeYMe`wxil1}}z}1K&MFf2yT( ze<&)&iXRRUzqY`0!(+yrTmUcgHAcn~o1heAlqCY9zKEzO4t93%l08Ayfa={?Omw|KWoG z5jhnVR9ekas~_Y|oWgbnsA~rl03u0%UG9mAkQCm|?MfGpm zT%0!dg+CFDN<9J0?pqPa9a1YFHy6lKe_8uAZZ+EMTeS!e57Ef9C%F#+QxUlJ(@-tc#=X@-6P4Cr%*GN9~TPgC^u_4V`=Qqp#Fb2F@IzC|eVd(xanxkfKn z{bsq&bq0ooz@Zf8yR->V4I^1n*h))E+GjrOpB_uP2+$ZB8e+?Bxlz;5YfJL*sMk&V zSQ@q%){L`0tpS|Q5S9%Jy%l)!AsRv^zd=3o&6~2-Rgqg6u~W91TU9a5%DFU)x1#QhR^6cHbf zeRzDzNXln&I3~_DLrYkEa)lWZl4QnaGZYxl_KLdTeBm42p9X}Y5y}G_kwQ%zltsoI zC7OvwvZ-wC?c0H-3{~0tzCJACez>79NmVdN#+4$Z+x$l)=CV!87PM@6o+FZeLI!jsM(Bs1{b1>|ty8Cnd;Z1@)RV#-Aw{rZCDkwWiDL?gg_&>O@{ z=l>m1B1i^X>;T6d0W7VyRvHQ>^!HXOWqIQ8#^7t6*%i?3g&}z=EC1eKGOL@`fA}y@ zCKxm(-4&caV17q09Uv-)!VR}r?0g22e)yvRIXO8!y^LOSTWSi~c3~hU4J+b?vv6^3 z!)(~>&69p;Od$aSHX3`AdNnyFg(Dw`yr;Pm4 zx^C}2?;Tc8tq>$ETKKq|w&;JVq@;C<9^ZC!els1qeAhNRVlu))0hdH&=gzIomuEDT zBtO+L5zC%xY|J|2GrKH&=c6Wffa*hgbjSE6O|2M4d~z2pV7*pBNymq-gv(3_=@^pd20=)$^x$l5%32%Rv(f|mUpir3_9X6><6j;vSe9m`Vsh(GAO>(8O+NR2YEon#QHuKFd#{ z6eB}N6bV-w#--I$Ir-=pF@iA>udV`RM1I~Zhd}sBF_u>emy2Pd=G5O3Rj*@wzC}Nv znYY+o=X%v!`c$K^#VBvZO_iJc# zxWa=Oyo7(7-n|Qge;-I>>IP>V#=u|NuY)j{MMS)TQV%nxbC4sq0FTl4U>WovcnIJW zDaX?s0-<@?b(H+3x-Ip#NzR{v>^ZkAqP%r9HU1$dUu_ogUm`>$Z$AGbZM7M8*|&{@ zM26H6(*t@=&iQwNJ{Bu6Kg8oEq9zFfedQbH2HQe0QNM($b-1J$D>};Y6%*wK%h(1I z7y}i&zizQ?+ugRIjWc08z-9rI3wx%#&%?n8laA|#$8$L(i>&Lb8TFvB|5a7i|2s0= z0C50!(-l%e(yOYv$0{jlQJR8epuKvasmagH?FUmOm$vU25fRwi72s+CAsf*Hwe;Nc z7#kYgOkfvz07~osQHFUZSH1Wanx<2 zxx~;=8f!Dc>_Y6XU99bFUvea4xX6j7ix9X;CPnDa9`p!$CA8MhzGd}D4q0aK>u6b3 z`3V4zu;RtY+WPx*LTZ?d?YHmnZF9u+gI@;)Ev>lKN0e$Z?6UBKtx{-d>{#hN*gQf` zM%M85ExUDYax&UF3G|%Jm6f(YG28`F)AGEH0WUfB*s!9ycG??Wy3V;>JrlAfM#B5F z7_OMhG|rf^W3M*$n*+K%2KE|`2ov~76=^xsHL5LWaQKU;Uf{Xhx5|1`+eVrvQiStX zL(KfcXzo+Z!k~!{XPl8Q5wo6EoUE~m;!e39P2apLu;Zu0>WzP86&fht3IEGy+-dXPi{(zAf)85urL_kEO|~m(2-^X zdCT#+O3LKU33_ps$A&rpu^U6VkmGHIXz$#)gZ2P}PY-S=C`?S_1^6z2F>;HO^BbT~ zd14;3HMjm79?4f$+Y}|)vH0s?G%WJUz~LPcDT33s9*LKjPA?hIxjg!2HMQJJk1piB zV=Q&(26pe>Db^-l3U&&1MrCy!Vk|e!c9_0E`ibq;Sax`nHQP9~rE%!Uz@cQ_SS3(I zVjejgy(@1v-$t@v#vrUhOiszJyC42ed5bY{9*-oH1&&2+XzDRa3B;@lzV;}x1^hq{tfD${Me&Q%T~hqPw_?y#razXKmjESyv?BY;o;+} z)hv`gi^WrQa;m9m6FWPCjtBxb(5--h)Q#2c(`L+=IPm1nvJ4h~?b81K)U%$Q`7r(^ zVwie^#|L`xFyK5~!ng=}lBfk|P$Bf2{ce@erU;Z?(sW5|EQRrKRG+q)S>n})t{p%v z(Pf2-3c7ylw8y#@vDP~1T(RJM>MPmxgC-N^@%oyV&F}6ZQ3&j9LQ+K%Z04dj#n(}( zdGpho1N}HN`o}F;L0X8O0jA)9Ij0sV+Kd2Nhy9_9iJQO?rKF@hn+93P6%@OD_Fwl; zPxqIY5T-%r-$CgC;6GaS4Os?oNxMhg$N)zE2V)efD~lVa$B25*?aSokF{-$WIDY+U z;mHH5%zreggm!TL#Z9=BN?5l%R zru8pvr+tajM)Zw~X|MIN7cl33Dv*%eCaQTzM*AFDGO=5U>k`;4y8BaAp`{kQ)0Un+ z8&30Nz#+oK4Z&m%)#9YsjkL#35>F^7Ho!Ctm%;ltE%F6xHHC!{+xeQwKb{*C5}cT# z8Ym_0p?bF71`woXDyfA`IwpHwPTj%)BnlD1lJ-bcF!_T1+ z=UIw0!p{3T;=u6`6dy1eubJKf@m~qd2WCXFrpN%(E!M)$P(*tDcsWJ9>(1y6oy^JPwK6XmJD0XH@YsB9+|~fq3kc#t-kuGHIT{rbAi9S2qab%20FV$~086j0TOnvfUCp5e z2nu?qT-t_qH-f+6{j8gwmsh;2;P_&?#*B%M{I1PU+l+DyW+^GOYX{#g{z90mMu=fj zQ(@5LU?Aiu6t!i6WS15#OQ9sUZjjS{<|i+-D$38#DXgX=7mKJEZu-=)pv#YW35o~gZXl(qUa`ODS(+BEdq zJqo3QO|IDXpQnK?o0#n-xvvO15kxF(F-(Ocn=Us#{z-&gc)$roR902JU-D#MVS~w5 z--@lHBT?fWU}Qn-4pLxv9dC|2wXs>z-()~g?8itf6rC_8sJ*NMF4tl z@ZU8i=xM=$DK$IWPJo68w-5HoL4^d>!C-?U^aZs*A6`}E%a>g(xCfLMyncn(`x5l6w&0bNtLjDg;PGh6P1D z-SXM1NGWK+_M|*Z)*T;&FAQR6aVI}Kki5o>IY?s=S%Ne@@Q}Xi`rG%dovi&5JljP9 z*1&_1pkxbd9N7>^qxFLfWT8EFZ>(>Ge`(S%`kHk=zxcb#KS4Ryp#%+}9*PytIXf|Q z#xmECWmLHOg~H%blpY9D4Gl-J5q+soT%t?P?y!IUaJ^oV@!DWRw}B9?dIPnl}_4Sox%eEDglc> zJ39lmTIwgZ=e$ZtG+PS>OX8|2Xd(1Xu(*2D?7iele=lV{T~HV*tkSV))O%cy*JMqG zyMXATHx0J9m?aL}=ug2ffEhX!CFO$qAa7mQZ^PwNIgNl(q5PB-pmj$P)7c1Sm+kvg z?p-ru*v9l~#P_)of*zZ6UtR7)cYI5~HckEv5z@B;0ZBZc@5YTj`=*;591w=Wdr?-Y zY<_vlZ8wpvUaq$IiU<)B0X733lIo`H5*3B$41_Ea8J1=yK8**Qrx6$LD@SMu;dpW& zh!`bBIXI%2k#^_K?&#MMx(bHjBRCE~)pHwE6$q%2(NKoIS)kXz3kJ}c{8;c=l7fr zfr6!_rJNiFLWR(|qqt?W?N+@gO#9o~W^CR9P&#iIK(@NJ26VR2zvNyFV=v+h4r0XNt=Gau4&eMveeTHsG5TfDK!7x z8sUolp*(dX0v_tv=x9iy1`0cc4L@vj+Y7u;T6(~Lco1bG@3 z%vKeOA7Ubuv_ttz0f+E{o(cyC2f|T!6w&R-2w+B!0Ez(=QE*w<@4kB1x8e(+#Ko^| z5Ae!BoA>=%y2|5|puA3XA~Pa={LwP;$^J#$H9Pzk+i-}KG z`nKV}7qnu&f46>ZAyYLbXg3JD+d zhk_ltGRC%MYyk|E4WQLqP9jFz*~i%mWNr@?2$;=TI$no{WBQZgm42>o*d4>>NH}@Z z`a|1_HNqfc_fb!-hQfuXW=E)3-G4K_M2ZtadfR8w2buX*4I0E{S+`1p*ukAaMJ2jy zeJ)`Ub8g>N6u~??N5%K3D)g&H3fme`0#{eX^{gG7Np91KONmFI`UHTIbpS4T=@6>2 zvZAkuCf~Haq6O3Q=~LgbaXsJUXNliCI*QE&WiIxpVR5vt|es` zzg2!@Za#y#|CBwHK)BobpUgE6bf11xuA+&QVYx4WT5wH-ssLO%(_QQGKDPqlV7{wh?fzAo+i0n|==oyQto8rKjd zoXtko%&*KsH@S3l@{kB*WH|zxS-2g=SoC^a+qKp9s+as;Vj(xb%#y7WhKXpO{ggdB z*$1jRy1zl@Dy3=`9^d|!nQLipe;UKW`#r=cV+! zYzgVQv=l#VOZ0}hgY!*%)=?6!*pl09*ug+w0XbtBGfl#O_4NW#eJkD_?7D4@_%GVn zk(eXLDVd?1u_RHD;BwS&<}sA5UGH(IkuWt=y=*tGWybQ0DKXI3fBME?-nm8R)z7(> zI>Q4h{~_3a0Iw zR&3r*PSg@`YMC|uks)W5HR zi2&E+^YCyGto}X>e^@tNQC0@iXXc1Jji>MsJE%d$fmV2&?#^Wv7J@a(B9vk)1N{s> z^A;kxYinyB9v*H955m*=<(&o} z&$jB^AGfOY7;VB%OiEQ4G`sNpkkBG^a5gwFEc^_$3dpg1w8F>4B#IYl0+EYcOsIK3j?d9pE(4~&)1~Sm^nD^xnEA%Hvx{TDSf^UerK51 zP%P`-9Uwo!C80-K!=fv3**Cvrhu2L5SO&bBcL0V0_1xG4h?=E|(te=B_I5g(?*5maks$&oIdJrrUFyx1 zk?_CY0Y?aC5`6v` z0m_vaAR#PVA|U(F3)zmMtq1_J1kxyG5BN=vZ*y_;@Z^Dk?~Ef5(k<>%Gw2!{{{;su zyKnUtt!CLn8~t(AbwKL}hngOLbZ1=tUb@ib-`{gmQc_?Y_E*e~Hnscxe=Yzz^`UX$ z?yI0rX~YOAO}2ezJOhhV+IMgl+=Qw2(W(vDp*rP%f0IB>%oH&j=wD! z^oh#zAkh1u0TV(NLl+JfiZn!#a0u=kaDv~IN$J?f21lsSz-p(!cNKs8XL(r}e8Fol z-D~w&Ro6xqqaiSO=I}xTMm`$81y^%R|GvW50#e%UT+5n+I7}p<4l65TGj@WrIWayC zMc_aJU5op7wayhVT7U z@ABfO*fp0$pVw~&q|74a*ahp@gqMolRRTY>`2GEKgG63PQmK$EQ-b52-v;|{4%qm{S{Jsb?LcO zPO5NKsp*d*+mDEt%PyOf*Kvuv9^kLG@t1nh;Bx4<<%#pU+{Y+XB8tP8^r+<@hr7x; zYJ|l!iI&+_WjUFXSxzyC^bmP?#C(5)HuBy__LAVhH@re(_i_U|_)aiX+KqCXRhC@7 z^4>4-V*PMq(%Ywr0PlLXEF^Ru)^!D2GmELb=Yu!DwENHfn_!HPWZfHRK;EftHUbR8!0i(R@M7XNAfCR0!X$5Wo*4Ea( zso%bRgSNYU4d)U*NP@B@U>*0E0w<@G2Hr#U;t2tE97i}?y|p7p{Ux+VYgwD<>DY24 zIv$5KHVR4JS#@ti$wMzbU?=&e5$US|KVDV1-Z(xwLdipv?is<@E?Mqcp8E}|++L%{ zP9gyp{HC3v_0N#SFwVKZDkdaEQD>1ckN4jThFe|&ou6Qd3#h)qPXrtSrRHS-oH`tT z^7>XJ(RCdytx;$GZW>lHUsJ^}p>}G~2ub8+_cf6h1wmJVnqAu4vZfK827h9Yj5cU- zmb&xbyBV8bkLwro|Lr*66$}Uu0Ity`TZg_mxw$YOGYRO1%0S<51B$B=V6%9(=jG*j z|Diet_hxepdRT@E&kYaf3yb%QZz6N7tlG_);Iw za_-fPU%Q-ZiQe`g3tRJxe9Wsg!_zs9sFuKti+5j|t|cZ>7iTU#&KIzf{xBr+D_AAp zPlj#eZrNhz)>x(y2<9iqr}GGf@KH?E>1p#7%jODhho3C^{@Bsv-6&v_9ex_f-g{pt z5zLPv+qXcZ<>TW6^8h&2QcBw5`L&=R0|GVOU1v!EVB`4T(a_RzXNTlzY(fVDgER9t z27q^U7|IZ++29>DaEx|Y2L5*{=-y~)A(M{5wgGTwYkc|gzb9{Og>r!Khh`FQ zZr)Q= zo+!f_?SPL)1kui?Sl1*+=jUIK*@F9-HwfebOMp)TzwsB;KhMAdqOA>*NrfG)L;z3_ zPhlnlU^|_l)km;B%FD?Sw;croz(@M}r=Y8|*x3SS4)j5Nh6&uK(lG;LDZup|H`x+V?-yf0a7iE}(yV}r zfD#Su-Ua0zUIGh zG~nm{0Xw}1SpqDQl7E4s0N*1NHT|ZeXy?zXpMgFBlmksyJ~pR&yW@}K$mB6uLd8>1 z(Tlk~o?x1wCwvriY_DLFKbQl83Wf_2x6+fxs-7ujW1kZdUVlFI41 z{k`G+#5O}d{r=^n__d46t+&QDPoEBd$(zZ)v-w2~zVb}#^TL1Ig}?j)?d^ZS@t^}h zYJ8lGiYg-~=kxuwRUb&x`uh5ciXUM1`|?5VSD2NPlb3Sk_)?U-n{5KqGpIv23Ip7? z$IDaLrlzI<(LRP?1)gS~q2mCLgc7^R>y|cev+IuhGx`QpGOQnGV89}j4G?9)1-Src z_^!YU@65pyP!4CQ-wf|vK0a@if2V)2Hw%J1hq^936O;VROek$WL-_+&23-0e@PjWV zI)cJcK->R7z)MiR5v*7@eJd8K9a880OE^>I6%|H2R4sERhK7HD>cRjnlPc{Q&@O#<6s+?c%n2){J!HR222XGH;l$87I1+C40iGl~^bsra*u z1#aHGdzV*?>*hQ*#{5vaS}_@ddD_o&C!t%lPDTN_@>02Osn=zFknDX6$=!T}ddL=M zPPX*l(Uoxa{s98lR#)u_%a<}q=z=N+NiaI0VJp$Dl4}j zR)M@=^Nmwb5O@fC=k4d!0bGI(&GrYUBz5f(M%A0&#xw*ArTm*aI|HCYgUh&xHmL=VPczMtfF|bA z)gN+t4_S3u;v0_QC@CwRJi%SxDBI!Ul#L$UCE{cwSnAx`?nn-{#Vl>uoq4+n&jQ*~ z@S?5sHjH{o1C1&0TupR!k0Is5el90S0Pt(#;^K_J#~pqP7`vf+I>1-3|3vvizYYb0 z0C;af?|XHa7z{S&`;b+Q5PGjvdaQU9u-97J?AHw)5jEr{#`Kw*`@;;^h$3`-q zx`&*`e7KsJBZooj`y-1*h4$WZmqSoQh5<_eU61v$tg7fb_N|DW-XV6x@$_A+$O^Ur z#esruEhE1YNijF|)5}rA_38omaz_ef3bV3WU02>io8nB`zAeO_; z1Hg|GO$B$4-fhv zIF$&r%mIp!qN76Si)%5CeI)_^cLv$IxaSto&?psaOzSqI$UwV;hMK;ArqrmBeKoxU zT6RFt(0VvwQjxoMe&0{UCdJqDEk123-(E`FvOo3Ce4@#}@gDMLA)Fkw1Iki4DK%E5D1t^lh!jf`J zy&ByVt@Hsh(gs)v01Zu^4;%^8mSBuJvA9TL4wn%!yJ35;`PsH+Fcc$jAOHhRlHui( zKmE1LJ}BrDJf-`4@XMDxaDk-;lbickILmP3;PWDT0B`u?BL^ZcU?W&H8NzT)&&d*V z{==dFQfV5n)Mj^1aGyu6BG-i;JpO?HB0fINdS>k~YsiN05*IA;8ag}Y*A8tH2;7^7 z;&Oo92QYXYS0R>vQH45VH2UVir&t~5r_Kcw4}7hMfl~kjWvD-Oa&JbU_S)N~^Ie92 zOG$vf-z{o|*6H$oH=^~7*nGT96*?XKoHDp}@Gpk+mrIjS zNgzGfrpL$SaeAMK9Q&j(BLjk?P~+gs&kPzV*QI;8%z((k)$)f7%x}`(2%2MHa%K%} zfbbrw=S#Ruz^m+FQw28Hr=g*m|0(xpo0XA4gjGY1Qh#xM#tZN@I;wO| zoWPnP_I<+pUs|zUpAH!~apq)*iwcX;A}}Zf(z(d+T~A)CcK@uRrCGq=iW*Y2#o~-` z!N}S`-a@2BU@xPjneJvHh(cs3WpyXUl{XUqcnNxRMuloKA*^g{V3?Z;U4l%o=>%84l>gTiS0Zr8SiIQVdAza7_gd`C5OyE2 zYgM)eQBYIE$pP>jz8X7LPRxK~cfl7xZCLKX_-7X6dJ74&HpszHvB7s`=jgZ|12!3e z%EJ$}d7E&(bhkSsN^Ny2rX(vrzea5TyAdw5!T_JIxB}eo5CfwKxWNd>i~nhw8R_21*7gOH%8nPmOByqWZ?}%+?QnZlw*^d?-Me;gqq^t+Ar={cNh}_PouN56T1E56X&N- zD@v+JMT{bbM8IemW|(04?!mt$w|pV-*9S-rPz#`;P3MI_W>+U>aKg-PgPx4>7yq(v*! z;)VBz1NxarNyn;BPfXn)tHY240W4!ePqIj7J$R$*0sc^}KA2vZn(DVxQBxcL()}z* zZ5v$Ep!?tQ?#Ru_sZEWq!{A2ZV3nH7PPN6srt*Z?*x}C__v496wXEw(CUy{Eqs6(o zl*WHf&I2HN%s1NUDk(jmanw*(H*T~g^*Qa~AhqP-q-Rc4r81ZV?<4{O0yqi{fapVt zfBpJ(Yilb2q-UTSfNydRiUG)?Pb@9x=H}Wee!<27$KEiU5>7PISxCoAgrHfuPlfxr z+${6`7aKyXoczifr(3)dT`v2*eNiNjzM8(-D&1|cYKS1uZ|h%sUSe>9tC~X5|4O_g zpy9?W56N{Q*DqPggd3Egi>{(Bdj7KnU2~_&KM&6K-2L%!|2)t8PIGL?F7xti(LJ6< zJc{=%h|ncZl7C%?i zUtx@v2=_ml#IT9u+L28{L!8(W&e)O?PUG)15m5O83R6;20>hq1dV0|8VO7@D@N#is z2mqaXH^QT?DD8-GL$1vCiFwZo^UfYKb`TFh?RvGwkF~Ykz!elg8?;Y9$mT)W;xC6qIb`pxjEjNI8Lz`zrEE*-HBkb4Fd&DSb@>U)eC&a-N(|8XG$o$( znk^%0joaS0Foc(xJ$eLagc5uwj&%f+r`X|7`VgYFbf*rgNFj0mMkHm=^An<6yG#z@&`!o;2e&;-<{sbH#7{ZLDtD++S4|NmzC=oPPQ{jBWxO7g1=>2`vYwV z>SnOv;l6bXICk%GmVu`L;1STRsi6zzFR-(=C$k*=9bv;VuN$qDxbP%M`Zqjr85kx3 zwgkbd3+9ht5W{Qs4m=39B@?Tvaz^TVoPVZ^n0i59{qJ#C-RMA0vbW~k$vazFpZdKb zqZBqby8RuB5HWq%M7J&-Sn1QKd>DwQ_G-p)qr|1|PjJFNmtj?W9Nk_k;K>d)Q}1$f zi7_&e-}U;FmbR`!Fth1#zl4&AKo`m0`+`Hh)vn%pkcx4ywy21ioBIbCl0mNsMe5$q zpY(!ONFOU8k@>U*1111H*KnbXOeMEd3jHKB_Ya# zcztI{x|~`-22a(m3$s!-iXoFueDQoaRkxyAGedt$r13qWR+nu|Ons#A%$yvJ6~Cu^ z1l##?+A=N5S(TqAJu}x-)N(cAb{~!`f^&3yoRP|g5+}4U0YSdNmuW5(=6Sy!amq~K zo}SqDUlwXMH&h{Ho^Biwb>I~8`@uDFtiMCr5Mpuf79-R#)ItTwkM3>!kLfm|UEkrr0bN%T4Pt2|q<70cC(s+#SoZ%an;GTMTLZ_y5pz-tkzs@BhEdtWXiM zGec&Pk(Euhl~X?o4Z$}j9G13CX9wOVXp<~IjJS8+`OBopS_9w- z@Djat7ZTTei&3DMV6?x!88;05M4(RBL#qNvBG-K&-_lav;&EYcRc~D(eNakz+_2pB zo>f!SPrz*ESCz#T%`3;N2TZ@HrTuG4)^tr-ZS5Jb@im}*+Hb!2V`m04pv8zk*{%C& z%V`entR~eZ!I!pq9FdW(TPwpaVjBX;MZfC@5;v0GuEuSbzyUiuL<4~BeEco0|M2Dw zK#gFn#a7Udwf^S-USwOw*4G~yzm9hRIu7?7d6#warNJErUn~;uLTmyyL=PP@L;GIR zG1YIMBkR$w3;&I8E%(KnNhUCc@X*u88CJi1bQ|i&jq21eFaUC+*#9yact96i1``UX z>&qdR2*l7Z-gwH#7rXB!De2EB4@wQVfGex0jHf(kMx|vU80G(RRb^=F0kQwA~2c=`qFM??Se*3phE7 ztKKINN=UA0NacJLLc(2Z^uwl??0FKm`;Is@nU~osHIj5SyAP3jx<)mrGzwpH#wo&O z5S+nNv_66h5X4 z8=dZ9S&iFF*E?w|Z{<%Way6s7v*o zT$dFRnVy7B=`G`eBGL7Cf}n zP|?F}HtES*Pt%;|9ttE6GAhd--6SX0($%#p3ktc<6o?kiBV9cHSfC05NCFu{09N6* zJhU3voC24KwG~SYqq3k=&$KO#Zv?)|H$;;VrV|C@>j){SKmf;CpX`uGBDf`X!tg^E z78`6HtrdTf0*ky1gXshYd7W(R>YB9J!Pf5|N7a#A(4|cW%&&aU$V3J0n>;5 z-SbpEtQ{x6Uf12GbYM9wG7?i&+aIC62G9Ah32BA zqUkqO1=AgvL&D1+>OJw9Z#@N=%h|Zkr4N+4*Lf4{#TUmQjwWa}<=uI?8y)_CT7VnS zC@_4u&}*qia^S9S7{?yEVPAf$)fM}XwrIL+Uch)S1@B4A`}gd&Rbh9EBNAAz|7jT& z6^VKFez9b%@>%`nhpv+q7gRBf(8*$9{86VzNXMI>^)s^a^XHY04s(STmI}I$JN5;P znT3UgwP)X6kQBFD^P{gtGXy&d-SX`=#nk+RDZL8^@%Z`@CUBS@O5XNeS&{RmF<1{4 zKcJU8sEIM2InVe(yT#OO%BPAUbFt-9ou;JLEH(M-Ymq@y#e$<&%kOCDBsL@1rNj|D z_X>T9=&&0C2hUK7b7%S)*`100bSYr8g@*kD@Z@~^77nmYm}|gI^avttV3@J((sWMk z^6W88gQ1a?F6Pq+gD}8_ptdNn2&x#N75lw$v@!XiJK7@>vR>wNosk9X0144ry-F$-KDG{;o@jILT@*I z)Tr=VPZ^Zw<$h)8Bl+_>Z%{T!_hA;@#8bkkHZ<;7za-3>vWlSsS9R=m<+LZZe^OpA z;>&ap&`AogtBVt3eUQv>D{Ud6tKuW?XBMlwVc;P_%g6rXfZkKF84)~E$lGj$shM}5 z60?VdY5L9XDRE9F&aM-VMD+xwk*}n$VI`^Gx4wB&i{a!X!osQV&+W=(qU2T4syZzO z4SV|9|;3LN&`A$?3IAaD%Fn{r-VW8+(ZErs7I)_y?( zFAT;(Um_BpYz>i}D6gnUh@)EmuW-iJA{^`Wr=(=vO=lM0zP^p} z10`Rm=in59ZW#^Dyt4-yIYCGB{6|I_qxW}aXSr|Eq>#Y9^7n7Yx#mpy*j&U%vviJL z(R413KI~@8M;IR>BPqD)uC;#m@2?GiVF#6g#L};E!}*)xA3ngZh4@M9=$YKq92Sat zlSB8gtnV3hLaAxQ(E@=M44B>BZTIhIZ64M3^prh+{tyH>g9`)>DS{eW`uZ3d-|cC( z(GcOc0$Ld5kJ2hIeUrt@Dk}+XK0JPqRbRibF|h`hYp94)m1*Vbhsbls&pkvr`XKf- z2bx>^nad%1LA}~vw6vBdr-adfs4uSIn)t#>sY*JH*1U}Lqma(`*vPZ|IHgA&=YSs3y)`f ziceM7!fv*8d85hG;VFvc+kTr_9@{rGE6|o%;`l!}yUu0B&fePC%s|UyZH-fqit~BI zw{c20?Q>5Bl~`Br@%2NePU=-sDdKSd4E#Q$M@bJxG5+lB;XRzhBeV1pogK<%a<0Q@ zo@iG5bXop2!xuD*$~P;@K|CAet?yg&WO{~Z1mGH>q^A)daBsfd;71W zI*XNZH8;#$Vvam5W(VtZVsGV(VJby=*idhMw-1;K&Pu&ONX=35vUPvIY$T?T1c%s5 z)Au&<(aedu=ksd%Wx`9jCYYM~?6#Asq&#UUbOi$=r(q)uYLO$W(96pbG`@>TPftG& zAa9r(2*FDZjR^2L8*`I`z(4)k`Ap-Q0X$LIj^XmxH!v_x#qL_42D%3Aw6EbI?dzsI{YOu_01HERX#c1KiqEM>o4Rb#bjF%65jM@rBJ%fiYXuGN61oX%VkjIy&n>r zN2x0`^^`4tqC1?YWS3f5Hh2c>2_5TT8Mi|CZ7a*#&&fiLYjNEgEKvjf3K6uH9|Oe; z1jNo3m4-VX2NGZ1YN} z$;F)Z9^rP3WaCA+3 zsH=P`z zup#6fCUp9hPil5|zp}Qlt{&(tWccCA2gEXx6Kx20d30EEzj4t`(y6kWnYVPXFgKm? z*~ZWj3d%?zkm)A*CGF><6f{cpXLP}H)5id1-KmTU2=t#qeF@uGs$9S@)QFET7eXT zGOO{z;-+`6ty}UbG?>F?CeX0oZw>q>=0QQP$K`%=?69U9NQ&^@Yiez-tehN!h7fYT zL8LhC1f1;|Cv9(U?`O~2VAlia;$yq-q`7vyAIh@&30shc{qE05588|F;;UKOal099hc6MKV_o0TUSw=K9R#l&{Kn$4isRQAf=x}Ak-e*8hm%8Nr#-!>{WHp9j4tx% zO2<`oiG$-c&?|ICP~Vu__VGHd7&o|fH)gPHNIu^L&>W;erWux*-}Hqw==dRv_5{Yc z^1D2RYbdv+(d&FoMBn`TjN`b@O*Gn(0f)%=goMr+66#5p0~l&5YAwA=`!hDV`^SS; zGw#Xx z``aIX*KJ(Le%J9w^Bq{;uQjb2nTeI_(3B0P^c{B?W!C~;Gfgm>ZFfzEjy$+dG^wUOj)Fp3|WI zEHbq-As7R|d-5pWKmSg-#t*+qYXnRbcX4+Sp^BkXI>HFn(zWk~4(|K{0xlf~6bKOO zL{hPiC4YftudK}Bf@VDns54~#MPTmC-=J-Tx%AD$t2r&Z`YT9x-AfP=DxWYXP!~Tn zr9~w4GWB{h{e|i0b+Wy~;o4&znyTJ1@0s_ZBcup88gG%gJbwR>=)ODPQcL%E|8!3h zNFEPKs2!v;Za59RB9Ts*xnpY5eL8E(Pa~#t7A$r#AFDF9i@S1o!Vh5xHdD?Bz=u!` zL=h1qK7@we%5kXlYgL7~UjPmLzn1^9!= z*5<*XF$y{4)I2ubc@YgaCpq64kV=T6O4EcLuArZl4xFX-G( zYvnaCxzLDV{HkyaayX(mb;+-t^P8)Vb8KBs#Tyhjn&4K@CrXHTJd}`6$ClyCes}?yI;H31LCrd9ki%1F5=mNEEZad8BB(D9 zAQT9K%UocP1@h&qf5~>&>eXw9Bd!17xqIUth zw0e;I!0^8>T?_>nAD_j-rgFz}PD9Ui4V9;)G`S5v{(ARsJPLlD%*KYueW!@gL&t+h z@zHNFPuKf+k3|eMp5T3G*MxH1-sMY}q|Hz4+s@v_91qv$nC_f9;=ON~n@ZYiXqqB*ey+nRF7SDhqfoRCm(& z9Kae1I%oa$P!YZAu6+z62aQHp~rpYwIKImCQ2uR#W={Ym!r&PeElrKszG_(Dr- zbab%yuzK~*v2MwBjqh)}=o zDmHfM3+N7~4XQxV2pcKlE~lRBII8B(UnGhpZ))CNu-+c)E-5Cd!r`d{(2_v#b568% z-ZC=lbDr8HnEDg?Az&^;T&FyjE-AKIzd&d*BIfnbn&(Tc+TTxnixR!+vJ>*rP*dA2 zOBZW09ytSy@O7Ck=+LovsFd{RD-6skD_;b)+8^ep^)4Q?E;%X^Cd`=fpwG2@iiru% z`I480WJPT!pggk~pqxY7E`u4%st<`Wp?{HB($9!_}F46;iz z4zn{B1qCpQ@;NLVrAux zi|ZyrXp=hBeKha5R5#%h71wo|`k2j2UFS7ZcXxNNkG0K%e+53Ut~DHWR|Bb@2ZGZE zWXn8J12-3rPrs3_9M#rRQ6v1%GAU=c)(mV9_lI*^gwqJdO7TnqjTu zS5S$-(4CVrlDHIjty`Ql&5#=m$t{es=puXD7(;?Lj+VHQN~DaTh>cX5WraWC-YtU<~{LuS3Lu zE9yZWx+YioB3t42C0dKg>jPLgz!?<*~2 zkn8RZmJhixK#b>;Ogeep(mR&u-|-Mio60gSbcXhdRm74foB>#tzVTL27+tFr$OH-y zeXR5usucNnFdCh|}O3sS|p}8sjd=<8h*J&bP%b zYZ^=D!AIne>57I6Qoj@-uiZ8x(MK+SM8wT}H}*q>sL@_bztz>%?XA7J?sC^gSlu%} zyD)5dps(k%?#q*v#i)EF)l<4P_=-kSGv&q_uV1}>yKCGqwDPuQPd2LYvXV62c0%u6 z`EOI6y%^j%4No<-Tdyg;l1TBclpiz}jaVp`O$jpv*rNV;wX=X!CroguyDet9pWSmy zcHn)`tCN>Ls9+UXxB817cF;+V+3Mqy(Y2VDR(jNa;?%F-D-yQT^=8Mw_&l}FBj+C5 zPJV~4d8@p${l^(eu`;n+gufixV_u>Ex;{#EX9y%mwA4%e-YGYZVZBL3(z&|4+zqKf zCntBJ68m#isyy9uvr5nYdtCorIxKK=iV; zFFLx?K=6fD9K3mme>1!;yIInx%bwmk(W%p|HJ8)hV6|V0qB-n(Mx>G=f~ML~s&d!+b5%AS?bd{v zQKZ(lnV)|_{O_G0Q$U)(l#DDRsZl5na-0glH@4zej#NqDV2@l{ooRTqa`c4u>2n!bj{JYj5EdiUc;2Oj$TDwc^XSTGLdc84~n z#GFp*%~SvGxS3dE;B`cLZrcne7nS~(-DK4AFgTLxVy=ORh<0jy24AkLw{M7& z=}#4Qz#~?AQ6V;kpkXU^&3s(kU26T!k`fFm&bkT91JJ9EE-pqQ zl0~QL+Sd_`L51XN!ZK{c8Sy#08HZ}{#tMU~%N&A2H;r&%%iwO*L zm5_Bq;iJzl@%es54q21)b4<*?BcvjqiY=&{{Zsv-Li$_|7qR+cLZ6=RzZJ9iyoCb!zMnjd9s6Ng4nLg2dVmY!1+Ag2a=QN3UvfWmkX+Ys zu=Bx$K@cdlutmC-fZ{b>$mxx=bUPRip%Wzne*yZ1z(9I6wD&BY!O9MKJ~wd?T}Txc z9E27*V}lx&r?^9ydui6YB18H7tUEgXnhTpf}!@_B>4tgVm*`tVWe=ifM z`qfb3D!AcZ`1^C3YL}lElF_R1%|jE?PH>g|ifJA&#NZKvS4f+c#X#$J5Dg-uG(lah zS8pft*y37DD+r!b#fX_Z_I%9LQC4HwnGKB5a+ytxPiT0zw5ffMvUXDVfZ61~iwxt} zU$IWJ85{h#&N?G0?@&-acH8@rJN8Mj!inu;^e29&j)T5m2OH50J<4&+Ul{pL`nxdI zKI=cRlJbtsE@KxK&{f=C@9~umBV*3{lEU_|;G3w`_R24c1No2u4fWvVNb*}WG%XoI z4YskS9}k8Dh@NtrFZ|vcjwRz96Rg{a86vhV>F6Np>+Vs0t~kE-UmQ<<7Nh7v7Y6ka z=fvU{^uv?sEal;~)JIOkd}(k->vP}^!x1{b+!M=L}bmp$^lhs2en%vmTAJFqGeojwd z7ewcWAAeUK-ffE(lJ{KbX6cB0$SU}@{%fNI6S)X0J(JVPuaH=+=GF4$DC);v{~nPd zTk@B6LfU)pTA1g$2yKJ}+?g(&qEonA5F}yN+*4<(qC}(T(U{xz2TS_#tB2Wmdtb#d zIyH%3d;V9i=r{UbBELz`#~k`};f4B9Hcd4ak-ZZJ2HAg~J|#RBc;qZd{XqU1Uujvc zN*WTKNbGFzxBQ~{KvMhpbF@2{r5lxIQfA6g9`@?@RcIf(|4KK!8c8hEBIEk?B2Bm; z8yzz#HlA95)qCwncm3?6q~JWl^Y&tI9tgnk(#Xy(>%B{wn8eFrYeW^m_MtVUri`gf zw9lI2vAO2J3u12K(uK&W`p}g0WDD+l7jBrmuh(+&HdF$of4^tE={;{~@Uqr%2+G8| zN#PHyKRP=isGhQM2?uN7M`R-;yPK~5jaPA`89HZ`H-1&S{p0tEp9ZtCg5p3%vx$nR z`Swa5WoqX>{(Y^?q`nT8boX}$D;F_$Iyb3P39H!IpPEeARPBj8i;bo6{EHSgM3Vm5 zhsSG=8+ zxf^}~Rs8mMm;TQ}4?o*I=m@9XpKBu`fA}x_+d-P1*I|U$%`?Mw4La3FgW=MbT%=f< zXgb1gPrAAn7R@Y>$Wj?ot`uR9;ge|e@jF(G#Lo{7$H{dhyqG|F2W_brYHHCVtB})%66nubR}LFS;X?fWU;?DIm?RVBiB=+C2LA&c&ff zC#6JQGjJDypXWN00=^D-sTb^@^qKR0dBlGGvHoV|UDIy?e+f1qv*w|fuy2sul@0%m>UecAoCbu*5-f-jQ zM{ivE3j%w~H-S4fbcTJh?c+C!rC)t-^6YMzjAqc`A{e7#BQRc_qRvQEtjCZV!V zoTQmKFyieV!6iRdf>8X2>tdD_3latVAGF+XW7uzECS11p zyI2qr*15`?2;loqheBljXNmh|y{p_weO~bl=k=4!y~~l_D9_)q`xR=07u3Ik{T@xt zD2KH;W_!JRTJ{Xf(;!ND__$}2Alw&0H|I=TE*6IPM|8K@aNpXR!yvIf3Ub(kI!R6lvGZipJr=k6?2EWu`H6zst5bLa2j_!IA_v(SZ$FJbqG;>CvK=H=q6)Kas2sR?YH9AsFZ|>3lb0u_XR(DWTL*nD;H4w4|Bh)CajV zGL#3guC#~cFPE!6a|lS~9LeG`(Fv&?pGIk*77?!NCuL|9q84!hID;X65qo529|`<6 zNyUnl(Et0DTuzhSP_gTNWotQ^M#letT7Y`S)W)W)4;kaDZeji%Jm2pZiWNJ|J!%|Y zKfwH9{V2@;_xSfcF)b~OZ5npLqXnPF-xHIkZM32dj=Soq`5LA{ALriIre z4{I*Ri0-`ECnN~mVMi}U5@?|@d50t*kUgwSzIa(DPe~+w}dWl|OQ87REgB<#VN45z zxRxrmgdZyg8K=h|-G)olWhc8`{i(N`S*pro`S)`7b!G4#iaJ% zZFy?F(k%Rf!~IHD%=7QnL|d>F55;XPELKzZ2pnbftCdkE;TpC}>{WKjX^K{ty9BDZ zV>Xi-&THRTlgzR-v_@IES&M2Ma{n&ne8BuWX){bLiSwi19KG<2XdN_epK)4oOz~D& zNt)ku#L1gLVoW5pG!v671ZrsKDknd(80#zlWjT)w^;OT$(S4P_ii4XyNtWO#P?RJV zWtS3DysB`?mTi|7v;FH3e(vWf<7-UL(OgnU54QUI=_O z!ra*zIK?r?OZZ`+>9GQ9RpN__=Q}=pU61e85W9DRwMD@Cw>=nyVQuN3y`KiB7Z~CG zRgs%a&p^QYZ{VB2ME6?x{N@ek*r@bxYcIF91%)}dQ-ZCv0lL&gpv{L^cTs3kH;V)U zr|vzh{J9dv$U^mpeB#5nX<1oOh-jT_qMA?!4gIQMM% z(a7f{B0v13#f(8H-MG)qJhEk`p!euse-A;`+C$^q0nw=%x2dANk>!pE>Zc}?IV}N7 zGyidu7I{B$ml9jBEosEiZqHLk3;clUJ>U&bjL~$p>7+Tx7V%G*tbW#M4z^3P_1(k%R z!Vj)j9)_3BZI=$C?m;3q(sN}Jy|8+=PYhM}n~4BFER>#4|3xw$#? zAtSj;DLhW^6yAMaiNY>MNB$d5K%xE#;U8?afD;=YL^05gps{jtg01+#RRmf|fbSd9 zdjv#EPMvaK)WC-|_^TLX!dD8!8(rfWSHYWhZJWIAY=nja8#@w~8r!^gV89T3i0j2o zjiBfQD#IIeZdnlRf!NQAn;b}EKELjeA~3h^#qx-;s3vf%OEI944~_*bEQ||%C@|$B3s!SKu3uN){9?H z>2R@(2uF(*cY2%SZsX3R|Yr*~z5~D|&==e&eY(MoSjg z1*bpBD<=OObl$-Q)t{PUzx3Zse>Zk$iT`CNj9zucengnuOG#tF9J_Yh-Fvq~Vh=-34MPr3E_?(Wx!F{IG-hFsAy`oXZYE8`fgh^5O>*Sp>P5ed zZt{c)6&NtaRZQyA)qTqnVP)d?F+B?Z8q2K0O3o{Hn*vi@>yr7(pM*T!A6u_R9v$66 z!S^tLnX~}G`m)MQ(sBe2d!g!!0$yg9bH4kS{&WW@b0T`o=|FVu0EDj|!=>{q!!%bp zYF`5Dhjgs4ZE{L%Y9ng!J3}xLwcxvh-@7s42YpcCCywMw%S>YbW;HzcXdBGNX1^I6 zoOK@eYTy9XzdkO7A%KILwHq4OFIIWM8YGUYnmUXyw2M=5jiXd(m9%RSvm*@ ztJT&%nm7bO_*W?{39P1fg>}0~Ly;qtK)Lr?2zZ9Z#>N0XlD+%p?=l$a zKpyOHd3MMg`x^FGXrcgo;9|7cTMMv8z(@l4zNH|D^Y}q_2Rw)>Akl-}S`R@;P&+kZ zUjl~2HX<>2_4UBN4ATk-wWV?uV7dOt1JM+46dM{+gD-BPn*gr95!0xlB||(uGarxc zgHWn88oB*Z%)aSFBDFB##*EL2Qor;`{-|{rMa=3at%Pr9dYs}FmLN1X8hu<+#0u+yJBz8$_N&~X15F*0!3;4_yu6! z*jhnI{|8FMn9*pW8Q-!>_VhNvN?q?-bL#nA-%6K-uEo+UW}=XoC_jofOrGdpiEz>; z_0P}w-{&Wu{rj1emKrT$d(d_AvSF}6>uXP`GRvFk95%dL7c|rEU$Ai&L?%tv4)n%+ zqP5@R&|jD|+o9jgJ^Fxg@jM!=1vmnU2+z%VE_5s@G)(Ru5+bRmkH_<&_vHProvl@g@II~XZSbo}x)v!CdeB>}Vc-*e-wHq>iJldB)ncC#%IUGw{ z1UT-7WzUVotU@jS9yNrs&FLhOysx%|2UKB6fc$XTet7T z*gZ&j+q=p3#Gf-zYD@WmtrADYZhIL(N zwK!PbS-Qvdma$yxjwB9_2u9zKO873Zdd!Cek}BNw4r+#M&FSLg?r%=hOsNt@27u@Cek zk;sNmBig8)3msCO)6lFP=hsfVsG|`gfnNrNSq4LVO&q-fLYxA>{xq&_hN*ap8Wt}d z@No+KU|DIw_B%N48`Q3Qg2E&v{p_0RDUFjE=j`lE9s$sl75m?Ap-3d?1Bj*42MSn2 z8Rf%yJhT)37*%J!y@ly_9KxSn(YThU|Fkpcn9V&S-5Kl99_4tX6*;hcuJEsy>~qso z$|-Hi^9OkrM40AAGdR}zTuaJyE=SL}2bgjv z4)2%lJM5PNfbIt2hp_jNl16lxSsCabNxA6%qRNhQht#ND{-CPXI2&B75{N%YZ_t&5 z%>4c#Lrw|iU%Qe!9o-jv$|>C(yQa*1TxoxIQ~FX8<(2o8Ql3V}hES{Q*mD2CE+a1q zqsINtBk3z%_iOFYRtrToI#bTC`}>KJMtWOKd6BL5xXp}iRpa3l3G$XCdGMky@rQRF zgcf-Hq2^6lMYq$z4aWw8R*<^_+PDNBUci_18Ct;L8Yu3w-~s@g45SjggEq_s)Ycah z1Kvjf5u=59JNWDXoe12CbU-uNDqq7QpJfih{{pFe*Gw9vPaIQ~d~S1PM)=?Z7j4-4 zpLf?4AQN~V_(m}=YEt&4T6bJ$x?TiP(e;&V#@&6MXzu4&%C5D~BDkjV$o5bD`Q~%s zwY6CymbtOL3r@q~g|}16&VfUu?{ljkuk{d7$b<{M*VbSOr*0@e%biM?5pj}??D{5y z#!ZPCsz6%uGHT;6uH2ESTGQerrng0vQ$*;yOQ6)dhE|Fn{vwDSMFF|o&qzJG7*E9b zX9RNsudn&TW{q8TUvnmf%vZhXU%U89!i#S!agH#5ifE585p4QWBj~;GV>dgPu<3lO z``=NKuh?Ji*3X~2)Fhh)9jJ0&2e``LgaPF@jB@NMX)heGnYZf7#6!^|WkB%fXY;H@x-+&0VWL>45lf7dc(F5y^t#u}e-1Yy zZ&Gpu`)AQHh=Q2JDZ;__Ri)W0B=WXod(anFWR@xRE1v!T=C%z-^37fassa#?JyhXA1xN3uvxWkqmHf{A=UVX_ zqn|`KiJY74e^TENHKd{cQ9Abyi}yTelx&a`D-r`?V7O(p`~?}H-zeqD;#CyuedDI{ zL{4R3KCNc}flRu~&0X)ljMo^NDwn>_MZd0Ew(45QH_qnu7;oQuBaDIa!y!p4nofJe za!A=UP>_%Jq=F+ruu^P}TFlw=t^A)$7p|MwA~p2sidcxB;ty872&f~FSr!Hw=t)kC zIS&Je^>*f=bld}=-*0%vfaC~%^y|cP^EL9*%d5JrS}X8D0+|+YIwu94G|9l_Zn|1; z0*3tg6bQTk>6i{~J7B{sujZv+@8D05r}sgt;0@b3gwDMJv=w+Nx$aS@Ui$NCY2EEh zKZkVtYhTAv^OYd}KX%P-4z0oPh&bY?@qTsTcImh`5^8dj6$mQL9K0&QS`%g2-=`B? zX|L*Ry-VKhTp$BP>mtkHTFmO{04tw~4?B$G{NUX6|L zMi!)jO!7ODP8`!ExP<2j!1F7U&R&kHXS7nGeSL$DJuJlL3rTDK?d|*+B<1o?IcAR7 zOhKx$>W@_6#65zGDSRE)9&^=AEtrMh`wo`hnGlI?ZkqboO(31Pdxwo?myI?}RpQDj z`iY{1es*hpeJR^^! zg<$XvEUOcRp!fl90xc~s(Iy++gwgcO3;j}x4G&^3$hJ<()&2AUvu5#0h!pzk^TI4v zBrGt5MZ?q zuDi}i>Vo28;NCdrre6Q)XP#`3?!&wSb$+pLcs@CP0pP#mnLO4W7=9SIEyXj)MqVt6 zT`gWSZDA|)JS;E-goYS!j-a2!hwiG11KUB}GeNh|+*v*SLRF?s%09X6)rV--VQPi5 zY*}p?mRXZ!tamPLpH2A%$j;hWx_ix3=6)(F%KuV>Zrk6M$t)OzjJr$5Xc8UyGi=2c z!)Y%xELVj_TQqA9{!x3S#w|cps@3~S{-3-26`p0f=}%f`=T(<^UaqF~ZJl_zGrITw z2OK<)Z>x$ni^vM8$elTC;?70MnJ)=Qr=P};7q~Xy-PjAA9`1d<7hN+gEyY(sa5}&F z;g{;|?!&$%1w`>|yqltri&uAW%&jWa9vA4W*2f^P`MqT37sUi`2y93bu%mJ3O4ZcV zVlSwL8bKsOzgot2O7s&(Sy^0<$r%66Ev8{-4bTu5Hb(& zyJ<*p1kQ}j6xhy_A!`s07>K9YQ)hcZoqqol4qT3R?ZtUHC6+4HsB#i;2!NqO9}18n-o|);;r-H8cSgPK6kX9|U52D`w%MZNI&Z+-TltO68 zQgd7*@)$zTXF)Cu7nOK9#@f0%$T^z*{re@LC1sOtOFHU{z56Q%Yy%aQ0l0obD_E-b z1_B^B@Faqu^(9bm0WJal@Af(fRlJtJ-eQoUdg^`v4=eor%GMUPJwQ9a9{oBAe#oT~ zXP@P5pGCiDl0&7q{XQn_FhZ0)oZ^12%$nbWV;uLSc~>J^d-nHLuvp{OUV;+WFjb7{5T?4xuDFQZ2siOs3HJ6cQ>B$0RPgVj#72{Vp z+cgf>Bxn!W6HFLmR4}6}FPF5dQUs@3Lwf%rl^+CM5xap2xCWODtdnw!)&bz>vr*Csx5I0&mi1)$Dv{60Ge{tU4Du*iu}0gO z+G5Wo_E_fU_cEp)3?n z{HHuuSpDW=G+gELz<`}R2bC4q-20#+SyRE7$6 z4ta)He)ksC+#k;qnOP^_&r3L71Gu8PXat{vH6^-d((a}|M41oE)u6DrV$((qfqth{ju zDL9}-eEn(+Ftp3jqjAzM~S8NGnHQ9AX$Yn z@i46E%#EhDVo~RC|H5`L(n_K>>^%+7V>MKEx(a&gguGhYiwO^fG-wUE!e$Lx8Pwt-uFrH%eL01dA$XHi~n z+T#j5(J$$cc+Hh5@W@qxpEX9l^KC~IrGSk)6J0uej7oT~ej?Rh_4=_FaYb10qndSF zhRv@M=+u{R%R~T^!vG+Rz}frbJY|D`E#R8s2NVZzjsgYk8WjgZ=4)LL09G%d4ebp| z%Yi3sv-WK_!F~@~XP{=fpRMMhwzjX7O&@5GfFbpPz_fiEq+Nq3(B}-C>0tGc1=G5- z0DSqT-3U8Ks{E#3S6Ng<*bGxB?B2mo3g7!QiZ$$s7fg#t3BUhp<;QBqiDCZvs50Gk zyDzi$_zlrGbR4t3e8p-MVYPiYceqM}75aK2+Lge3xjnQePsi01|9$H(q&lqY;xEMHqpEn$~9{iLq1cJV^>qN7h4HUSJJ&fm6olnUHBY^B^M!0 zsNmOpxdw-g-J2H9>>cLlt-dahJ~1JnfY^|CP7?BtQ=$O zygh8u;PZvE9;nJdLKXw`D#*t{#&;dKYv;AD>*VAFCIVz}@iqAPIziKzrzk(495!+2 zUZ!~+T?5_0TsZXTc~mEc5iAmH|L_)GBOhRcgS5M0B>^d(eLy$^2G<6_Lf0&^y1F`A zh$QFooW~D16wz{0)T3WD=N-qpLB&e&RmXskSfIpZd$j@vgUbCsH7t^!%ghsE1xJ{v z3g+7g&g_zN+==^pruU~l#WIJ9OGWp){gGF}3r;=m3OXDtx(Bgb48BNw*?nZN8+9?^yn_2V zO+1ojl|Pe3X=H?E>$i{ntY6#t7ElD4Myoy)6y@i9ugmKVjE!C2+EUnXLzLBua3`>mBpR*=r^1MG>9rfJCNxjFXZ z6AV56RjG_`&CF8R1g{QxS#F0*{r9x(eS#kTY}2&&`>OxCa!uuQ#ec0U_dET~VWBLc zLd;!5IpJW})zHXZKLgZ%HOQc$+#@9;V~OpSV^r7Eql)U3mY%)lST>!{Y!gD1H@uqH z*j=bdp!V^GmAt++TvI#XchgmNvjUB~5zq|+sNDdd=|RH``fAX?bU^X=ltzW|HZCSQ zj1#Zcw42Tf?Ys)g${c|24=-wBlVYXW6&S*5A$(Z$f@IidT2l`jtNHVys`NY^p{Cl{ z2aIigM_Mtrf6GlL`XfO{`wwk$pJ7~O`SzxlyG0rMQqVExTcIVGQ^-7_VXoZTJ>T-S zZ?~!9Y&T zz(ZemybGl*yToSkVUv(_vyt_5ZERxf_7A zBy5&ao8<%juCv|ANNzyz+HHO<{VUZ!IGz7ehcIh(GuL*`WD??>D+LbA(}Sq-gFFRZ zJZ~^LEex|u5jK~Su%$~~43$t5M=*WyPE>88z~@7x>DW>%e0x^A00$Z!6-I(YOqzI1 z<_;{DKfS0qId=wrtkgREy{p+VYo6bO=%4uV33+V>z4yHMJi(tb?E(P8WX z5@zrgMaRF3n@B`;itfgXaQ>u9khSmSHe>2%vROI3{ToKAkIpn$xSVBvt|W;ttRAzYi0i<6!-w zj!+bh8+ebn!Wnc7{0?^sGrOs&I>6wy$NzO+ev`DHj_XP3pNeXe=X%37|SN8@oS*QWD5nb*s?c=1GT}Pc1H(ROdv(Dc$SBJ|T zeyoqjpe06c2kzz&ho;@a=OM{aoIuaMVhLy#`^c`FIk+<0K^I-r-ty!C#4%P$-q%6|m#U~}f&p^DKw#@!5)kW>;h zO0E)QoRAWXulojU)kJufX?fh0Wihe5=Ll*GVef4yG?#B%O;%8cr>=GSE!2C( z8YeBJ!mGrRelaf>o{RRiRFQJG$5I3`DSC-X}~dUzk}y02t-mX5jCI~}>%6&9U)Jw#Y>k)nGhCC60SyUw5Q<``v~&(bN8 zwtH3e?40l;Z*FaK5DjeuYrh}r>h;IJdwXwz&t)Kx#)YCDUOH1_AH zD*bNyHR#fTe+rP*0+r)2utWmT8jzkeP*nxOp?bQy&&pv3UzfFiT9g^I-U9cdXAf|& zhpgVlb0LxbY4MxbHlE&_riK|4gErOhgWs9XRX4Rx(lZBnU5}PSUWsFAqg{ z2MsLnMEh-YM}yr@7mC?|88fEMp@m}9uY8S$WE8PEsvtmGPl6CgZ&S*@HlYum+dWT*CrjU@;Svzn@L4aBZ(}?c1O8bSXkp|dgUEDMw z`&(cspbAcSs~a-XIM-)ROcZmZ*WsXLdYe>gtT^sP-Aa2C}eo?Ez0 znkyA_Gnp)WCOdf7=U`=$^M+z0dHbom>qhsG5GAj97WYe}^Mz^le61=t+a77zk160` zNhAH2FHu3B_kNNluaSP^&yFot)Mxb~BZ^qzc~BX}D?um%3ERzPCXk@{s5NHNzG3iA zmyveK+*-?c@v)y-SEt;jB)alG4M|X9=e6HGI6e#I0{XsXj59G^s{E~Q+fb&+Z_hQ8 z$&&>A&}&$scd6T|P-^T5;3stnwqk)S zA{vk*!U#&;@~*!>A_zaK~eGlGn_1ThgH z2c3MCcIx6aCO2}zrHnd|5#kFw;$Yd8Br|7vhoPZy7qU8EmqGHdfT%z79IEv67AGdE zflw|e&q45A8;}-%)aM z;d-|1;@1e*9eKkVqv`14jd$o**d4OHl9|HBw_Dli-PLx2ct+_ul=7)xs;94|lbv~) z1bA(~OHek-=EFO!JXGVhn9+k*Q7LgMqoSonzh@kIqsEN?VLgHlB*e2&R^J?eWI0mdj0nQawdd~ceykjb4M2e1s~!k06BB>{|IEDu zdU2G365;irY|#f;6R?7zdv20|<+cL|Rkm`}*bjFdlOBIC%CiUg5a|&q;LtxEVlIrw zqx(M&a^Km+9+U~rE@HXw=ja*cyeOO84tML?A9UV-Jv?fx!q$DThb*-%PuT3VT>9aC z`BzQu=)!@U;Ewl;vX}7U5X`?#k)`l>M^1gtTneo}l4XO*TiaVf-_daf#;b{Q85w0FhUCvBn&L4BH&a~*zDGvEzUe?h|5M*q=ixUT zBPgYmuF5T}0n69O_eO0UsF1Lc4lq?7nt5`d|G4h1wifmCr zg*x~)vbl86u3Riee!=2|hWogu{C@26@^}RwA>op=7FeE>_d{+*FPky2!Td%YW>%=Z ziv-h|$CZ&pm-*tx|JOg6L^WVA!9^_`6 z6yN{~f%jnFj+dYg2@)aC<9oKd)6+Y+y5n(J)G{=zC@EPue9g#syU_ly0BozX0ACjX zXd5S>Slt25UsJiKyF1Woe|Q!{03yR*kGHqBfWJ%o*$ChRoQ(kd33>n=K)_gSTZI4EZq%D1%G^*4F`Cr3;kJ>F|Ckw|A}N2k-+WWxyqU3tkF%Z=js2 z19x#qjG&-cl0CK$=wRdVEP1Js6J(oE%V1y+t*KM&9tAudU+ zZ}=PMLod;e(yNV6om{|}JKn*N+FV(P{L;F}yJzc~E>7>m`48A3$h+GzH7so#g9!|V zK$`eiF>uKZAGZv3lAp_P;D`AJuXO(k*kA&e5HQ$i1&eZEX!Wda0n9IZbnO-pR(a6_ zrKu?wbKt@9K0JsTNc$Ie58(6z$B9%-6XPrZz-R)58W_+iOfu8cdx7^=?Y;{LL51?E z&kk-!M-C7$NVm1N2G&@A`UEE1B8>B~lcmeOaV1Y~eE)|mR$5We^trtlHImnz(xk!vbgs+2s9l^$9s=djhdt~X3wZ@GQ9bRJ%-N4Erbdc=)Mv%{8 zlC8<7$p@Q65O;NYb}bZjdq}6+Z2TZqO$zO*QA1f|oc>T>4%y{*ytGgrlEODdXokM1 zLg8MbFE3~#sLA5M&_eKLGXZ=zAUJaf*o&`-i1NniDa^n^4Vb!uX}IuBzY;(do|g;( zA_a>XsrlOobP~uX!(+gg1__QbPMYhL0&POq4q>1%!Rr*JIBC0;HWKSBvq`QM(0~Cu zri=4)rx6gl*zUKtfvp>G6$RtNP{=YEdH}DP!V(*1eBjpXdUWCya_9-b2+(`CmHs@+ z5_?Y>r$=ShlFd*+8fyuNS>W~Nk{$zBoL??lv}u}$Da4PU^fHYGvgBAD3Oq|S03-q2 zKAsidpKWdapzKL2jBx(w>Oc@ScxZu%Y(?LJr#yE4`%B4#h3klZ8oC(d9fSEQafnT1 z(O;sN^*?tzBRrlh_Y+MC$K2_>OJC)6mW@Qo>H3_tIE+O!bLc~y`XW7Pd{!l*Tz#oz zQC_m7bC#K`uYbdjrdJ7p{b{U??#h+&?fmD|0EHYS7y*~SqAl-SCc|iP;O0Y;rG?&h z8vO&;6vT+-=jnE>?bG>pVSV7>2L|}t+Xyg0z_0-%f%1liYha-uS@K5TMqLr4E46X3 zND^1yyK`opM(c|UMJiqdKeMw)9d-7=^mlRbUNhiA6TGh~EwqkG!@~-_Q0oTA$e--fGf`cT^06=X9puNt?G>>;}3XT%#*?>Vmv%L zis{Ma<7-R}L}K@K9&@u{h%Zn17Ob5t#_*@+j-4u-8s~Z+@_8r0(Q&EqQOS`^z^$c` z3Z|ZCRCp+5Ba+ZUR}(FA-D4l#+dBj7J&#yuon)_Yde1j3ln8qcDx8%RrZoOhKX|<{ zRF`0&0L!t~+d6V2>pu>jURsVO$!cj5tE{v2Jcl%jP=o{u2-6$SK0pAwQPk`GYS0p}Ui17Ogjz%b>WfhooS#N4|G7QT}xOopJ=T-!6_<1`k+k6YDz z|BfiaOAK!BIIxocY_VMp?1zSkQ?s&G#>c^F?pfeg7%VbrUtwLQ_dhr~dK_aj>Lq}T zcuyz111DJA40ZJY?*~YP%fMnDun2%ox(_f&cn6$80lb89y$r4e86cSN&p3wnnSx9L z=C>e18Qu(N0zQ|!eioat7&+#Ue@ZTWYEWQM1hQo^24zt>g?{Fwb2j}iC^32ZueMgH zt0Q5b(#P}W?;~$AZ6n;mqNJ2K>>c>ibp;sQxVGLJ?5^!xvbmqO%yE7n$>6KC z+0O4$W-OYiLz}E}(|5H->1ar71F)?`#oLZ_aC$H?CAVAS(V)-qmr0x)Qww-Zfst5? z!EuHE#SblJTDzaa!^3NtnTzSfYZ>(C`J+4g!ose^beKr9b%!pEdapIfCV#9P*W2r= zOJj3K0%cnuc_hD`4|u`F&TtE%7F#~9!%bTl$AYzil)JR|2@d9^0dz;yFz`S_5s1a{RT*XU>Loh z8gkwklyun!RJ+fFXIEWu5?=~x7&g#CRL%6VIJb&b3#7b!ST&7d4wvpXcYoF*#@a77 z9FBG7+3Tw`z=#Hju6?bRo`}AZFe3VQgZf}_q#@7QKP>I+Txl}UU_SLMW%60A??x@@ zGcS^HV4jWolpcf^36`=->-S699slhw<6!D-nHCK>B7%9A69fAKymY^0W|VRvXoJDIjOrNIVyTL8mm?{rcrlS75 zPgQM7{lft+%eo{8ch@SQC~s=+c?1Og39h6_a1aT~I8gjYNC1NjPViPr8m}kAPNQQ1r{gPD2IuF>#c0XR*#6#G2JVHR=P96DZ18`vvr$dQEa> z?$e6~u;hWTZgf?iD|f7|)gBAUX_;1CWX#o#sF8^1QCd!}g+)HfXP6G5pS2o(5+y1y zGd1ppg?g4Z*e|D_3Se+xy<%|XFy7s)z$GVEpQ#yo4T{@`?Ba0Ayu`oJu9)kSwMSYm zZsjSemC-Umpp$HPNbGSyI#P<8-VR<&;gCp;fRL7cP%z1AdG=qawlP-HF|h;^WS1Y;vXK|x@1 zUtO97oDP78)8D`(e@?>uPvL(*D$k1v0L%nTZbQ1)($k6ioP#@81I09To`)U8`ZJ)y zRn*XUb^``u+2`hlLi$TqmZ6GD71*)_J}Zoo@eF4GoO-yVz&<)b1S;Vr%93Kt=)YKs zUMXhDwX_|TWi(@C*xEEZ<NW-~ir@c^lEg-KxHH4O}A{U2_pr{6=w9)LenB-R`o zT5KOxu$Op_GTGVy9~qB>9$>G`?0@bt0cF>-WE-$O!j%MuF~BAJ)Tb33G%w&|1zrS; z&IbU?f@$>h{r$Z%{nj1u?{NV!^yuiQ9xz^h7(Up%OUH-f_8$RUH(#0Wi-ghp?X`l_~j-1$Ap zW?Og0LSyKQh-}CSh>uRLBe18qKVuU_XmODua4s5fZ)$uW))jC+ZTR+b)R66i>~BO# zUynMC6gEifq+WpmDtD>gttiN#FOjHV4)rgjN5rYc|4ZzrNN_OV-mf|F5~G9eC{&C> zLjzATCy}G~?c28@&!#Mi6Rdu_a#8p!*p4;KgU;Y2h2pqpt&2zImTFb41N}c7!f($Y z_DPgs%lU*{*Q=kvWuRi_L-z|FP%S*2rnJM2fl_jM+GCKV9u#Nbyx;f=O>*(SeG8y= z$<>px`eh$&m}?M9R_>OsKobGN3GYruMg|ayZz>0)zqDQcplR0o0#0UAJ`G}14U zZr?voC5J&01%8>c6BCq_L7|p?;aF505fF6^1zl~#perByc?kRz7s_nMW4Gl@`^9CZ zAL5FjdpT!%_=0Rl%IMmXhw}oOBhyVR=#N&24);|4o zIox~>0apvx1&Xxf@tUs9Z%ce>=BSDhZrZ0HcsUrM32;(7qX%M(6{jalhlSvFVE3B? zTN3aChPaDh8ESkyXTJ%|dzv-EKu863{KN3dy@IuM@ob*84%pC4B(G-v5j&;Ms}A^X z?|tER?g0e`07kK2I)Na&oZ+o1n9~CimLxxrss}%m1wVhIXILM){L~-}dzRO-;_`XC z_pXv6_|da3P3;13fT7L^iLU*NOQkPVU|?bn{x4~RF|xsGE`@#<_mLLz2XQ+RM08j; zw(R1h_7Au6hmYdNe-4(LkBvNCbrt7kc~2VMGjN8D{*_3mh%~yc{qb)|Y{_nS8z_qG z-`E-b=DpF?m_WN(L|tG(vQTX=HG^A%nQIGCtxfb9)QR-J%Rd6ZCMWT zo;D85cz4i!fVO10$=w!s{HduWSf_z(A?Ay+1~xkz04Es?Zdn1=;niw@Wy7=w+1c6Q z?5+M>1oHKByaTrLKA@ch_AFIDv$KsrkCUTJ|LXJY!z1Y4pDAsx>FEK3#@gQA9YjFl zIDQ=v`aKu2lD=Ke-l-@w}&%aCv$IpTE0(=J>N$`5+PQGhp4H;+ayCj8KwHaGp3GIh^F5n$8G5=1@)1qDE6!^HD>?i9a+Ck)zG@P0rv z{NU})H{o3d7qj6jGM)!G=+9x{#7j?ODl?N53%S%fja~@+v%w3|d!15Jl1i{Gk8M{R zLK8oV0y|31rxZG`jX(0nfOlr`;g6$%&y(IAn9A_ugcWWz=#%2uQ1^CnY0!jdrF9W< zQpR~K6)2MEyIkpGB&FafWWDEeH>pay^mwzuzEmcz{(_eBI%S@C(ZdZdg^TaIS+?}1 znQ%=+6%Aq5*B zej*UM>18*aE|a1Aoh0%{qNA*9kDkfnxcKJdNDdB!ne(Xs z9uhAkjan(;La3EIB^7}3&F0;?#o)$bdio=a89F9Sz!M`-?1Yd*WF$n@5!KouASpUh z9-Zw%=nLj9*X%fKJi=ivT&9i#A3&td!WFP(B8NR}IOp8wIoVG#ob#|ldEHAm(rCI+ zQW*3Rb9**9w=j~34+0z0K6Cc{^TM09hmroPSCr$_$jRL}PW-Zq7AF$e>4A(;jVbPO zI+Hjv>d{b!gkI)igBr8`7kFw?KJ@B==aH+275!=!%j&^Q!E)Pw{eR93p)(d#fGR64 z9QIES$pCMV7;&FT-P%dvw*LJD0gUto)9$DL!ff__lexB^?>8FIxiYe?rk8uF5_Ig| z>B1~jt1D0i7Me{6al+)5ymrzm>__I(_ndI|TM=Ap@_5*;-Cq`1nl%yqUh=XDBa+m? zQA*Ll+CQhf`~zSz{f_rx=yuFn3x(O$(y975Y2eyc1JzdJ z4s`AfCkW?#Iqn+GE>^rZLDB(}wopp)!3KkrugZL(+*JXv1+f0MePQj$Awt(ACjHG~ zHq_`;g&e{5m2luBHck1huU2PwmLa$QK{!LgDR*FgS(|DhC|8(}611~v$YB2~z&f~w zfj@B@$mN>NRGL}*VBI=SiZl*`2mpRa8xX>5W-Np^4(Bqfi%iBG6MHFXVNj7N{q$fc znw8BP4C3+you8gNBlDgs!bA!>K}N>TDmgu4sazfM%6l+p);^S2qNPNyXp>{L8k`*5 zFS-T>khiD=D92t}?=o>$f`nSlt7u3BOCb^G9!MiD{ycr)u(GlW?=gAKh`%Eb&Tc^5 zmJo3aK=_$$P%IIeC4tR9&XZ%;!*?k)$)%xTTBSvfiAh*jz0V@^43qVC&e9af{j5v* z%U`W`n`-W2;@pdb=gCxSC*|1);e~tSbZfVsaoAixbs(hYd1@l3nIf9AiJyeei+-#~ z`+^fWG-UBD^yg=Zg*mkzno!r5!NJjsP9;Ln!x zp&(>G39sX!H5<`&4GgQ&3X$hn^Q)gJUv#qXCrl%2Du)W6v=TPf2K)!{fig^rxSJZ0pQkgoCxyeZ^co`^V43}8ASVbt(Nx{Mi?h;BD!C)p7OALuZ9l*U~UtHfu zY@QMTJ>#jPF76;U^e7WW&1P0G-w>gl8Kl{jyDbg8sJ=qk{ie@?1lb}FJ7hzb%!Ak= zO6}{uahPxVe)(^W>U%Sl#h=Q>BT#9h)5%&=T1%K_2L=ajJJC6OTk{nzD@y0`wAgw{ zrlYOCI{Kn0bp*Lb?vA?I1rq<75}-cRnlQZt8}hhMc(5dK>>cqGQ~+e7Y_#K5N>V(z zdmtzhsD3^?n1NSyx0f32zh`|pX$`+$u`+z@5{*DX(Gq#+|2NO`#^HMM8-P$EQcFs< zMUc1QxJ&ksyQ9CUSlKv??eNMhMDX_)Ey!66XSgibPkAoZnYk6q!Q!%=Uv{^t$I+|R zL_h-V`AS}Yett>=V=Lkc1VI(Dmeg5qpXZX)DGFqk);GW7q#p&w9-C5N72j^*V(q0k z|8o7y^Tsx;!S7y>*=+nIdb3>{n}Py8Z zKa3TJLsM&0C1!o?#&CUoD?l0NVWl=`_tzMU7LKn}_aeSl+7qq5o|2($+NfeX(4t?> z62tC|EJmn z!wP934a{l&#h2u8n0qe;GXBY)?Y*hAHQcyNTdGBjX}x+(jVjAnV*gp>M8K)_$3!Io zicD`awsG0yH>_|h*8MLxy~>n!B<+p)+I?yi{?Gu1sEp>x)}joN*2MD2uKXm~^cvG6 z9suL_)6@y`ORGXfXglN4@c#eb{{{P-Nte`@;(exdM^l-;H79S}eC~g3AIq9O$$V@# zbfsdZ>-$*R2Y>AH@p;QC#nX|LGeoWwI3N9rAJ5EG+q*V;?A`wbYcDV=WrzzZj|9};wabhBAe}QPQMv0_{vHCS@MCp zSO!qpOCy+e7>7VtQ~!L>mU4-aN2=^1M54DGpphK%^7$9n`7-|Txn2O{vwl2U5{d<- z$CKqAm%C;h*moSeF3pm`OoP?TgD&2SiTQudwM3WGq&7-# zmGwG}7>btdW>*=JF80!NKN22C_BM%XRcW%Ie4S#_T>NkxX^q#duQRYI2T`C(RYWez zz1Y<~E~X~gbiX_y^N$9x^T>EN{Zcy*&KP34N}QdIsWFBOc_)H{JXzq)f*wdH645s` z>D*bym>#6e#v5oqn=E%Rrf$yrn?gGN{~mBT8!Yld(BA;eAmw(jb)$kC+xOk(HH26I z`}1uJv_u)vs3Y@XXW7g4EJ)n_qEG45#=G>t&5Iwa|Io|AGxAMan;u&a3W0T% zR~Q_E+k{gApKSj+>BVN52uZ%KWRwObjVt@F- z@!oneCQb1BXsw6tuOB|al6e|au=E*KMetJYScB59B8rZ3q-#FE(&hqElRZ4IOVbS4 zJ>(t6UZ6M;sl!9WQpME#0Pi*i7bSyo(-eE$Vp6eHNbm#x>AGnnWF$S+x{|q1c z$_SOSR>Gn8_AX;;;FOie1Qk_zP|BWUPkwK%QO{LY_O#SGmB=D#K;#sXJpNf z@@P);98wTAL?@B@qxq+E&ZlPYvzQ{$;2fCVaZ+yD=+tuu@w^3`?8shX#3314N~$PS z$~8rWjm8{^X@r*N{%lo&p|91{sjlN|mj0BvN@~Jq0N}q@Axi6qdW@&MAWZrds&1zm zW)!!Z$QJmKLs=oHqyzf4smk8=@CR#qo$F{&c}PYGN{P2Vh2|JWQCL4EG8GTO-$)Ol z2sT`UP0!&puD#xG_^)+6Hd-3U_PVd(hFxjYEElXJSs3G`8wcSW$s5b)V<<%7FQvJ@qDpR_V zEc?A^T9i+pbMV*e@H~sGXGxQoP+FSA(@9x<#NacXx4M~VV_S_M5B6xzT!V`tyf99m zI34y&;-{G6@VL2m&CsIRHAax0K~eEU;+GYyJ<*ut!kcM)KW+NT^IkS7x%DaxAJzfA zcD#S|G7EKIb5u$&Y5roQy-^F#{hMa}PE;^~E_unhPv3Q0v6XR$sxdh(mjA!gRLQnU9zksk?{Py!M?!2s ziQ=a<4I7Tn82=lMyoDj;G}_AKNTywaBy7l(L3mv1Suzdafchryq2r2s{H(X8`-WyQuK@ zKD}n&%7aOSpmV{)MPtA7I+han4k_A}Z8Wg-_-^+SP|h@;J9g5tL2De`Byf ztuf$0?2z5lp<`X5K(?!HN!=X%C$TP6Vg5}xw%jiiSJ$!W-PfnUT-vK^mK+}C)b*B| zwXr9OMz~KmaiHTw_48Wf-UsoMSF|(#T>AS6KnT^v#^%y21h^rk8j3PuP*PrZRMYS!yl1tUJ}VsJ(l*NU`6_$>xu>!4MUvHUZ@P8O{d+Gbwi_cfcBBzAh}iv;Wz01f56FTMCZ-r zsg~3cQyIaPX!Zr)OxCxmx&GbMuHRPp&X@*vGfaMxPSSETk20h<&vgmVK97!9OELEf{L*8{$RQo&nCMwG*EFO5|X zyiuA`7gE=TT&%*D(M7Sz(V)Tqmh+%B$2%n)?RQi^_NMk70yoi!{=S=G#UwM>`JqY6 z#Myp-AFQYgvn586q}YeBN`Qlj+QDw}3qTpY6{s-3m0W4Q*5bZ_h*)($8kRG)t!TNCf@UG$BG$ zlTG0=LhF3hz%jJL8v8KU{xC{LBH(j97SS1;BELoSa~MU{I#E#L-$3Aqd8t_&;Xx9r z_|7QC(Lh6Wp(_*WK`yi7nzXF7!Qt8J2Ofms(qsv)YYsD;-+ zEz1J#1qsteZN1R#xAuo|&^x^speW@E+481mfJx{>+v=QlkNJ|1h}umbeL3QIH?P@# z4w94e!yf)Ajvjn{cMub~HDon{ue@Ys#;xajg8vVabHa@vx>G~JwF#JD%AFbFS>xxU zZwgahB{!qFt6E-+zmyAQ7s4?Y*JKhab>n0i<4qvg8+2VK91{&aP{%zWS#&2(|YQDbTIMq5ezC{4%e7u5{itiM;LHA80$W2+y6ZpCXN#IdZ)U&r-7E1Isxfc2X^gS zhW3N-0=&_SA|ovvXyN-=_X$6%7r7l-!KSY!zxHw!a)zeCh~aAgdUzs-Sfw>&x^^JI z5E6(EjLmg?5`J-InIv!`85Jb21g=n?rw9M*p?_+Yt40 zCkm{yj<~ja-2h}2>{E>;XV_T%=Dv)LflZg2k4co(2t(0aOQz7!T|PS8{a`fc7b|!> zHm|{g4BZf8@x3cY=)j8bR7I3W6nY?){0NNZk;N1V-G^|91YRZ!QuXS}gYP>hSsVNX zLiMc%EPe)5+8$p;_HZQr;1&m{c-zH=V<9$qAilO$Ri@qqynp04{CfC$EG=i%lN;K#@(?mA5od}F0iI!B(P;jQv8KI&(1{=sjU=UE zm-3I|ltu4S!{n0+Ev!1{ey39peB_3faw8ib^hS07!3L^Pfv0n_`$ta&QWdB|TeSwP zm^D77wO$5c!&pU~U*-O498SA$!06+H6sy7Br-Tv9R|gV>2YYcRC)>S+vW?1aeL4Cw zEYvjHwe_!o9@{iedrQ7!=WQ)22{JN0yw}?#%*~abnNIawhs~dqc46$%Is0|Gn^8)S zX13d(zwSw+2o!#m?4dKOgiuVYAMVY{&s(LZjdm-J?G<`f;D!@*!uQcu_+P$x zmKfpJ@`RL=LZdp}K-&+lFjw#}c+g1_@43?EAGgMh zJ&-=4M%tf@izwp_h?c@c$g(IsM7W@NoZZANeKzVT}XUISG3)2peDT8wW-LKx>G z0=pt5SV^h1|ISQD15fb5k#?I_g$_bV{a5QeSNzW=R+G$VXPUDC5bQ;n1=DS+Pf_jL zrwmcX)lRbLdKje~NRt`M_kh5_tsGgUVJV{sqkhSvr8-5ifU{_Z9OM;RB8z6Hus)<^ zdxt?n>~)b0l);)F$O?Rw*c5Ulx2J3r=W6I}QxN39dwfGX)4{mL?sl;W@waS6}(DcOLG^K;zF~0As4r;Q1o#fA+_B!o0J?oiA zKHrR>Lq>B3HPCwsR_+sJ8#ykf= zLqrS0+pY}^fwQxe3bfV?g#}=8^5Hs%8v@5{T#czt5`N!qde4ZdhN>WRRq!Ph2B`)| zMM82tHDrzz@=0(o=r0j+Bf3@vQpT7ciI6x0!LcKQXoxDZ1SSkldS240u9%IG*YeIB z>7(`03**vL1)D72taAQ~lc9PS;rrVM58LR3{wvqA6Q{DXp<(A!Z^2``<4;N}!nj_O zKBS2VSj33$-4*aV`@XdMga_Khh*n;t{!1vX7o}!KkbO1UUh{D5f4SI4$NFbW$Ixqc zxORW$+`XQSJUs-l;T4?{hTub#W(;xrzqPiHeTwNMgcTi%Tm`2j?y0$-oa;C7BP)wp zvKUf&x>V373KVGf0-ei6rRv;Gv=bJrFK8R}$4u1@dl9DN|(x#5cEu*;& z+7Lt1a9K&ekEZ@OPGAi|$>$cR81Q}Z#DeemtI4_SGL`dj!`+>>^vBQEfWqb#hx-S9 zbVj^@Mr9O8eh7lVXFdE_;gijN&W~Ny$1VH*5ZH|(rJzK~z{;GttoL*+jWl;_XM6Q?LW)BBUP!T78ylPNNiZhoh0#pPM5zhVk}xR~6u0+X z=j<$YbA8~V{C3_~8E&_`ft9Nsvlm(3G$qw`= zml~c{gEr{y5TlCZI$_Gu*HbMGyl+zti3R-Yl-Gjd5$&mx`cbr<8?^|Tyc~oQ)^SCX zN2|tkJem!nqZG5&k z-^bj>ThFOi!l_T00*C0aF&OCR(Vt^&3m{fywbQhN`{!juoc5%tmeDg)sdeQemyfAO z{>|DCMspsG>M%yy$&EY=1=BjVBb%=#8WDQB+-oZ*D_Xqk1cb;Na+BmXwI<$2NABn( zOVi?Pxp=?7>Q*4e*jA6(0JPDtu->pRyEh+w5k#g?7<|oZ@znXj zAf{|YiFBzHHZf$FXfmWP&EOmM0?1+|UE=;UxqNl{qSeoDP?K4o&Cy>?P5hZdLxqix z^eBvqCB=NH?=vi^9-Fl`8rR^8uFXrnkwjul)YU(?jgaV$(~Hk*d9rkexbj z_NV{j0vIe+dh|S}0Qk^c-w z2mjQFiYX}kTt*1PNSic2W)-! z3cca(=mo`n5xPEfY5J>SOOa4H=m%i&j0UUh7x zTQ#QX9usZacvacF7*eK(AnP$o3OgBwK$yGIdkY?Nf|8Hg(`!c zp{1pTTBl{xSo@;`s}iNVQf^hDdd3z)DyP3l_UQivMyr`2X=@zhPPeKjtWTH4SppY} zjZCI739L+g<`@K0-gZl;8ozk`_Gvyy_cCeZP(S#j^fx^sfSO_Hub0Qd@?!g)kS|pj$O(8r!?-s@N->v zQOgdsw0zUA$6oN770EzPt#*S>i3}JC?uQ*_nJ9jRvlC%O zR_9#Kq3!^GKuME!S|}EcCbS)ixP!^Ocb7U635)g;x&HM zuG?*)z)UGtV3KN>xuVD%uzrsSF?Ad78;w4^yY2|;rW0Oly_@KH1VlSup>`+Zs}}DJ zk}~^?n}a=Wxr`8bTD5-liMoDfT^*`qRFk215;uhrlWb3RL^(@5HjtP;@90#tEd=qj z9B5gNKxJ=C20!XX&ySDY4-XGu#jIg*MOesSOc^-lxv}o)*ZWMRX$-#fpjPP5wSl6g zS#@)DRh|l1RNH@prgXw#Tu%z$-5B0aVJPYm{I*C7O(HF3)2jbN_xdX9(_!`jOfJQY z)<52iA#`+f%G42R;=iZY*O=-#3<2Zk*?QYe{ZgC%!(O=vD!LLL&ctmg=)NPRFp z4Ycbx!isvr+4nJHme~|14m9%8d3?+ybPu1J@eN~k zdM=Dl1&hM>qqXmcv0s&?@3`GkjPK!IYS3Ckfzu>p4MZ)yEj~j8X!Mjcx$LCals!)Nek>UJ zJ(N-PDFRBku19WOp5M~#i*H9Q_9waC?%GIDxj+IJi`L-h!Ydr>l_MQ>MM)i7Hur1& z&c}y4;n}vcv8wLxqnTIX35q7W>yjAD?dFsAL~nIMPI!_vMhc<&NfF%9WN_jk6UP|- z)ferLhp|A+>9e{}mi_Cdd@bp9j%K4Ir9#G}NP9ljS=eqWr-S4spTowU2`6J;Bt3&_ zBXCCrt~+4Ay;-T{N(!Xu>KhM?lwcU_hxxau2%#nL5jMA99wotnTa|u?bN$2=j4*)b z+pu%~qoURBX~{Q zXG7wBW}o=;*SpnG_{`uRMsrShC^UMl9CeOQIfrBi0zla~i791rSKd$GVrgCjV=-W* z_d+hd?n%{5y@J1AbG_Fz8M&iZ^H)PZR$QNQ2vhg$*;>%yf`{G8<>Tt=*gZVzEj~+| z?PP|&{u#c<4rc`8Bwir*H&PzZYqD?Swn$h1E~&Ryr+$;gYWQ0JZ@bUxuQ{qdrNe8Z zyU?FIufDx4yl*#ZCv4qu%Q^61KANxH zUu^vO;->ea-d?NGrYF*TaBpwY{;-b9=7v>>VdS}sW*4IIidX$y(cJZGuYMFm-ihkr zkfYpS{JUZn*s@v&_KfTIp{Qd`&6aCGgUg}WuX`_vpxif)!l|p*PT3 za<@z=xvY`7sN}&lJ~{L7nIqytHrGQm4K7pg@s;q%5MX5OoDpMIwCq+R1SOz1Bu!n z@q#9k=@a`F*XgBs_-cwAA7QUn&O-Z07aJVfXhZ@}SCM`jiQ{{%?IImx$L+u<<6T-V zXO?Y`q((@;e{m8x-5}60+Fo~YQNJ?xbRq>+)m`L0J;+c+_|JiGYyaiqSj+QEF~kSIAnW2~i^ zWV(A)$6?GVpkAbY2Zy_rNn!-wM|Kg2BmErsfXg`>p= z?=#>T+*tK<3Sab{gS@o;a1hH^`Q7bQw&my6d&D3zd%0oy@oljT1T&{RLA-#-y2I|# zvI{)dL^Q|vK$em+%)d4%?y<{D%gZgUqmT9rLcBirzm&54J)Rz8+wL}DqnZ{VY(IfK zjVouzFg|qFt(1-3C^O8W<*Bo+{c(W|JzBlGF>Ls1=_-jepz23AMZ!h8NPex0D5hAT zj4cg8EyOvptZ`9j>R=sN8_5*7Kv=;H&AvnxVj>0}Ff#g9G52;jcze9?^2#{{d49Xg zdHG9gdDcZ6r%20To9;TusUvGui-J9#Oj8bG?SAZ4sOb_=OK#Jz{;j-R1s|HYx;v8CsM$ zRS_q7j)q^vwG`h1PmD|Y!LA?T3qL&^PWfM65jjHUmu8$d_n*5i|7v;RnZcpp3PW9@2+4O72beKnZ(l+wCAzBIXW9SMopu{9G6%|(pP_M8<0)A-r` z?Vonv=hyd(JY^Bj`u2>-3_iO;-eGj?13ds981l)1D>x}!=j90s=Qx9oIdpIZI(gVcv z;+6T>OqaBl=)m$H;N%gBBnlqJy5BAtYr0uvna{KCPaS>S=M0YXB{*){(*GfUUbiPJ zW`A9y>xZel|3UKB^8P0B{X4Txo7ejKz1=^9RCA0pTqY+AhPptl3&;1Zk*IN^-|FNy%i4_G(tWz)b#BT~c__vpL-_x4+eakSFZwz1hXolcm zK4~KuD?z7|+1CyW9LX1{;vc`?Eq33KzCDnBUhIXoqa=2T)lI|?{@3x`WJtXEkaGT! z4v!($9j*~>H<4xRgIEkc_j4KGE2fHndYpC-*ad7l4qFlu9or$L3g^o9(tQx25YDQ4^c2! zD~qrV9fjc;oodn)q0i1cAC=x6=RVfm zk$xxlFn*BJ*grGVf@l3F4q&RmwBSZ%yNTC?IDKF`YKg*q0C2?by}$gt%KR%^97kHZ zF85?UeKWl>m$gFj$hIi59}>&0HOk%5#BS!S`bE}#4>?ylhVz5dPUh;b&{v}x{z^?> zG^?wNBumWC@$&xqu&lM|qet_#WwUM@!|%LhvFab^9_heu50CVJq(C^)3*mqwyz2L* zeShi}YI?s|w(q!uKyHHo&lZDzKKJtXe_I}H&`QmF+LCI|$_3jZUs=J3>eY$hY2~6I zUq~GS9L|{jbvv1%TWQ|c*GYfm_D5e2iA}8@H8`mU# z#*Xq!j#8movZ8ikB8Rb@wVbSsZVY8`Nt0Pzec^$Ioy)Tq9@CM192k7?>l>$7xcK9_ ztEcVfqX|w?3-5`yuh!5|@rA8|;f9^9rw3Er+Duw!ZRgoQFG5`rB<2`Lsq%6YfihJn>GPPXe4Xg^qFh$kLaM{Ktf;qg{#xh7* z83fYcU+eSmPzD(B2}HCn#Ka_;%5T--QWe#7e36Hdgr(oRH9V;Km39DK<*Ssq;DoJ) zh{=bVXN0o#j|{M2X-oO$$!(x@Kd|ZX5-~)ozeEMqT0J%%!wFddvStf>RML>7VH@68 zI1HGSDFXtiBa)8$V(k1bmd`Z}zRUFk0RGr-H?+xjeOV1FEs@lWfMvegVpR_ANkD0y zSCXClUp!x_F}^kGm2xUhr0A4E(L4;T4HA0^{@~xRsz-0NnU$^Bv|QVqv_ChPK{|DK zR9s{MnL!_X)7oqN^2kj^uCGfBUk6aoXB>SGR9TZ) zS~DIxzdo*ct}Be1`#$`B45C$r+zsk2>oa7G6%#++*1SmT*_!^i4}?Sc*w~`o_17Yz zJ}{9SWye7CBZ7U9Y4NHdg7wI=w7$pK02?(Yo=|yCcsJr!ObQ((l(pZ zKoaMNmxeqh>{oFpLh;<9vbVUfv(c9C@2434FWYBXKp&!scq9VpQflLt3~d}jGYB(I zZ4~wS{S2fAR5Xq~*U_8FBfE)leL=APWz<*^F95VQFg0BQEqjhPLVGrPHWC8G%d1At2LjM zqq6&QAI<3(4c9gyr=lo@%CFRb%A9dAON+u}d1rZn-RZOuUs-V$q5C26VYZ95I(9m>= z+7`1Kc9BAhPOe6e>1w!zU^JgU8ZCBZk$!rxdT(n;ksccQQW`EfTW9aTzDV{^_nT}u z)*4=Vq{o%fh5(&_DI`brSn+1r!rZB`CSi|(F^6+DOKd)~8tUpO*ioXwgZr{K6N>z4 z{Hq$WF9O`kimQ@+R(}xXGhpuRedl9fFxENALxD+Oq)!h^7EC6#Gf0$Ik+jM0){vekmjPytHMN0H zuI$AIwTr5o^1pQpb~Hj6it;LoT(wZhZZF^76XnAPLwy!IJ;#z+3_ASg%ag`B_78Bd zn@t&#kqeq}z_ZmQLqg z_3PWG(;vZA4D}#+4tb-)To+bXUmA@Rxx;w1BrHa)gF+l2(f2$G`FP%C`T>XB!!^Lz zXra68AIpjl85^5s@6Vd<`xk|pZzP>hO9h4xkj>Z0PIP*(NnN}GpRTP%%(6oiA8G%NfFQ+uU0RAT>o*1{=mD1YY}&>iRl`vurB^Xl1O+3&TrIaYR=yWrX z7%az;qcfP%MT;(*>5Em;GTeM}tVil-qu-o^I8FTf(6V0V3^3JD4isvl+|(p1ylg@X z!5^fvs#K%NGlv-qDs-xj7xYbuiQP}ioNLOD*wKP#{(P4G$WJ6z+oxzBCoS6V!6^!( zX@G3_G)oy0`8+ySLoD$9@NShy_@bv>9~(9*Db~pdB~d76uXcOk0w&-qy$5QTs!N`b zSv)?gl^S)P5SH95mJPi_z@|V#`_3H^opC(6RHRwGPzU^?b z)$79HJME`vcCFlA!rx$Qivf#?N`oSW(OV2GJf^tsc6v+wuPNKSniOgE*k!>$wWqrZYiio&U6_{Q6m_Ig(S21VQs%;C ziFy+i3m5i(Mp(1FQuw%fam>LUFkR?Qb^tIne|>R^-r-|3IVCydhz&mP^NbsgigJHY#_H&DS9>9^>+K;liTvMMVOas|0} z+NWX1Y)YqwC3i+;k;IGP=#5jh#k6t-!7KcDNeZU4H8DAzt-ig#ZrEL>@M7_#`H~tlpC6n^l-OY|`cukh23oz(xCeo|9!>kxWKsQ^F0=DSmlvbYYt&y{)ScNoi))hA&;Q? zB6TcY2CXs7&23v;?;@jM?(qSW8s~eu-B4MLhr>t(__UImF}6KWeEId@{oiKeUrrK| zQ-iGQ_KptCYTahziSq31_CgJ%8Dnu(O2zxHM7AOka9_v$%Z18b-4Ha28pACLr!m2J zG?av}Q)rR07>Z{Ju`A7zYztAwOIRLNtTt&{LXA4gz77vS}7G=`*H)#wEvjB(D^(ny)C}a_^ct9)jTaQV8`b{Y% z1f;$-2TV7Fdyy1P;_8u@{tHo+)@8@c?Q#-wyCyvE<9Fs_+(r7IYgd7MXFd^!Q^l z?I^Ur*Qs&H=q3jzLFe{_)I%fwrLso=BY1!-K=Cni+H}V_v_7 zA7uP%b1Ph(4S(o$B*c#(iGv^;$PG?Yq+@@!CDW<BtUp|2sQk+_#i<<6y07ma4`BPs}&|}E$JDxmRcBjk! z4$4&bm8Jwsg8Y~OPj!w+xBKlC^;pVXQ`1tOgCn&6O}{9OT?~Cxep!Tm>t4|&qQErw zS^}J$Q{jjjsh{RehfkpKp{U25_=i-B2$=C$`hy>nuIw}-8O z>*K}m`3x;K934Z$0vs$|0i4PEBYGoxKLkn~p=4wEX7_lU0CJ)UOz$N`w3Ct{~S6tQ{faqMwSG=iHu<~5JiQ`%oyOLx85)o{Q5 zx(>Fy!&75tI78QpNd-{BhhFUK>8WH3Zp^McEJ@toGbzf0PIPh4S{(P7Wux$Of>jaB zta(vo`me}{V^{=dxP)Z;H0B2pWnomCRtyJ{QIrdhv1hN;J!{qRJGor`jXEv#d6S+w zD$=g#5_WWSB(v((JDirAk0no@4=)xOkNsQb-twws*mL7Y0_PVwR8*9t+EHXnu$?m; zq6Or}!}T}6-YDZx*q9Kib)%HXcJd9tS2yuJE}8zy=5>E7fOKs5Sw7bn8BS33*3v!U zTvO0^UHz0J?X%UIOHH$%wIqe&M33BpB0X7Y0AM_Klan>Rr(-|c!EA-vMMKTT(^jiT_Tk1m(N>zm5;fEGN`BB}(NQhe$D;q0 zeucsyPb$LgcCp0Q*(Q|Wn^%@cZkY2l;pp4eM!r5X!(Ib{6me&`fWUTFv{jCOqRS1X zdOY2nQsBQz2aofqkFLb^Rl16$vmy5N6aG&NkPuFEVp$d?kz6gHp1;c9PzMFY;G|Kj zS*^|W^t<5w@%@Rl+vi~YEc-63xzYkrD2dygpADzYy-ckxGYY|{@%_;I+!5gcaOcYu z5wXTL9C0cU)cOp(=qth!87iA3gXk%0<3VK5Aq}e2uy9s(N5PH$tQP3O~u4M=B*trXR!(gTG`x z`f2ywy4&Q9C0!_05+`Ly{=VdG54HxeY(e9p{6dtBoi%SwS9M}mD5hu53SKyK>Lp=9 zrJD4Vd0Wp}FwaWfPoA7?-q^_AV@$5pZZ0iosNz$J7cVk$4~XQK{Yf^?T@M?2n;dBF z6w})*rGZO1h1i1%LZv(P4u5w13ghD7j-?WRKd)Xul}SSJw~)dp(H^b!x_REQS_fh| zb@pQufXk2BJdi6%D{(?7{-NT6<+SDC=(hSH&r=4xEzbQyS85F)4hk=HR5HW`eLg9PQ;gVGKk zS16}(U&o=&(p=QQ;P}>bUUwaCT!ch3A1MYUF{-r1LXh z<|rlvlLvw0EbB-0a)bBKB#A(a{U$Iw`F6;Ek28|G=Q++BqNa)e{4#^*^3EliQ5KW4k@(Otw~GGjb-II)mL6`Z zFf4Z%Gr@^z8~>akveYL%{i3HoiyS zxYKXKep^8;--!8qd3zoHP)#N@RbvHD{@qqdD%gxd4 zz3GJ)GZjYQ=44?jl@OP9D(3YntEyiD1|Zolrknp{7M0&E-a^o)ThP4t1IUwf)Xfd9 z3bYW@ZArX*YC+Z?LB0EOwz6QtXrg{~wfd&-a7AKNu$%&9v}v=1*=kZ%LPfo(RlRH4 zi_(RSntSrjt@^}gq(aW-yq3nR2;Q7>6?OF8wH2<9qaThJt96>Z9y>j)HKYwj*ZeS7 zL0D8=3%#ZQfp8%cO2RfU^m*)l)9Yydql^qZZkbvenf=c+F-b{d2H5LaIUFpUk=jTO zP6Ga_Q{DbXoU~!w2{KW}fw`5*aYZH&4K@>3K0tuQLLvPdj{UKsk2(k{u zNpv5~K<*GY1ZqT66sIUYv`|LE1Y3bu$fIT?qq@oC)J0&;Z`P&yZ9WdLUPh~| z?*`RHbuHe#05C0?=}d2JaTQWqQPo!6l%4)B0dNBbTn2{~yfYKM)}xXnSQ zt+5fCjU!))xNebpD98p8IzgfN_#95BH5}kE9j>@c74sxKBM-i+a0Ek#_fY`tJ>wcq zrBs~rsv%~gwzctg&|U>R-#}e6lhKWkZVIUBJ1 zC>HX-qm`*6jWBad=;5lO+^5?ub1TY$5))ip1mp%pF?8YDqN;SA36@%_@?Y>!t@ zWRc?SPZyG(&jp;X`l<~`KXt!%g*GV>=AjCPG4;d-3T}-r*R6Mmw7cwG`QF>>u6@oK z|Mq*+H8ocZF}E*UiSz8gJEL=HxrHqb3PKuf2?OBCrbuxr23o1A+tTn?OOdy|YlH^r z$&!;`=+6?Si^1;7np1t+4XPTM!*@5p3}VWtEKI;uEz72<(O+^s*~tznF@nrw$`?1J zRR#$->OEb++zZy4D|}6WS^XZZ$$24&w%$l1v}hKtN=ulx*!vsCBhprh=*5T)>)U5k zPviH2!zjx5^2A+nfYPn5gTiIo6Ux<;2m^5{Y_BKdK#~jEO(RP!ZZfcO-aSVEgNmxj&3_r8AdP3~_8UqXmnb z*^p=(t7TL;E`S|_HYTSGS3zO1#;_@01!xLmPpgMwLn851ZBeVs`m5Br3Ow+@FHyO$ zWva5NV`^}bBjKfr;yft3ZKn=56k=L4BUkpUn8zzpTd^I)%UOJya}vU^Lk?U?rv9RZ zK+ZIsT9K9oe z;apUyN!$ME+!-%cg>s6P0vRAZxWH{l@j5Bt+yaZZ0a{OdT5n)ln%VroMkD^w@_bRE zV%V+>GQBK}6Ij5(@P6>PThRIBW?(ERil#_T**`CbfL6!`9MCT_rT>rA%G&1X-2c3T z4>4;1E9Y56fmRgKxhLofXo4F`Qgnp{9sMMOL@MzocR>YV>9N}wQy8KrHfDQ)#W zpRGMjKGP~^uQj|oc^}vyyDWn6mQbOwJR}Q}(PRo99O&_b7Lpi?BoNS#B3qnqM~l4> z$Pnv(-L( z3GMeJ=wX^M);Z7;CRw5&T8{-(zGz&zP!Uw9nxD9c^{QBAkv$)F`#24EIHtJh64_9J zx|DYabf|L7C&8z+d;bf4JzG~9nd5?Af`2v^{yX}$v|PUmNyb8C;p1rHgh5d(=8uYDxqLUN(U5mb&$mSpc|*nP~v&CIhdLYiwL%>BVJ24*?4F#s5mrr ztWOT-)|I;wz=qW-37Fu=?}V#LJz3#)WD%*u{uRcGvCbc<$cc~W?x+MnWKo7mj%TF` z(*{2P!hq$X0xRUx!5pF%nETe)?>3>~Lk@euXDs`B( zG;%St{1H(X_PagQBmGqQ+>t{~oL2jQ@iroo4O9u1^RpkXb^p!QGW8R}VJanP?ERK! zR2Zf=Gr~4PwC*yQBd4y3Z>&U0x2t};fM=#}3NU$7D}KcdkFB()Ghy!WqtaBgr3aa? zH5ysX##19qzQ=Rh-VHT#)!Szs7S|h#Ex=X)1Zlv z7Musb<$f2+tPJUHI^tHH|MAelw9{jvOpHXlrCOFgRkWge-DYRvWcBdZ8e$d@J2zzl zOkX7w2CwsuWSRQ6cC#e889rAxaX?7=u-0SJxn&)TIZ_ylM+R#++SCkcQFPcV~Azv)&R&jG8AtzouKm|7hd0*KD`1fczlCXF#Bvt)>L9}~(%r19tg zqe`Eu+SEfu-zs;1RUsM|{Jpo5Iqv%`oHpEEyJ^y7%-LmKUIR?OWzE^j^JQ<2(6cz8 z@n<;|Y~0&O4sC}+*=IfZ#@kJ4Di;e*kRzCHHXfsJtEG4-UawW4p_!Wv*Gk|EFIO() z*NI1BI?_T}4s3v{!SS`S z@1Ry}+b5v^pwG;pG*lEttZQVtl#1`~AgU^wNVbl?$9MZe3j#!CfVg;%H3J~w<*EOa zu+J;oOIr>=+2#@2QpT?-y6Yp(HcwwcFg*Cn=;fy8pc* z817Nh6~kX|JZNkFz>?SZxll8NG}=h2>}h{(IP90REBldo%MUkToIEaHvMIwbKA!8w zDZ_H#7o%~00SW)u?*j@9%&TXo-<&4GFiaZ(H8Q)mF*V2-NtQxXuNKO}BDNV03Cg5K zwK@t<9(an%h4{aozcu8CdlQr-8Y~}(_k`yyb5nI?;s2a zTXT@`=$KETVN+!o&1=D!$&IQrrm;MQ%vF(zm}P}h@m5x5ZdStroPY>^=BM7~=G=6m zqwVP8yyZf*&c@74yPNfWycEP<$?uR|r#V7;0d=-k!qP+}iee3-;V`Etkx4x9TGp=X4e2hGAUZi*Xr_eK0It2+qyz=w70LB`u@G~_LfB-fPVlK zbiCVJt`@FFZUrQ;C^pS}U}oM6&tSCpe8KSFva&vG0Q#+5G6xeRQC;EC!<4QA%?8ri z?a7mnOmwsw-VZmj`58vPW0K$)h2MgRh-v`u3O#Lxy@vvdH-d6kV_+N@PjCC{DY3hI z(U?@$07so?u$SC9D_X?y-IK}L{lWkB2 zf*w&1ZThIM1G~~pXR&sLjnXq0slH<4;K1j(|B@}_o$WJ@*yF1E1joi9xZn286zCOd zfZ1AccyMh`0Rj=8MZ!*eV}&45d|_-b|Efu>#ivDlhL5>|Y-Cx%raIytx+zVc`Pq~csDA%J(pGyso%XcYym$OBJtO1aRa%p63ai0vgYzqpRn?kVU!tYQu?xH90++&ai$czMq~5od zb(t2P{5^tO77%Y>(^)Ojzz5^bFOj9RsWL!GA5%9ZYaD3H>+Yfloj{|YtOS9njf+U~ zLblo$GBJJ5T#Vn3Fbq8{WhC~OLkEj}cy50eU~IJcJr=KO@yrW|KhOsU1y6}V_t~`j zvb)xgPNf_CsSL5F)Y6j41v-$V(hnL@_308D7s}Kb;y}>K$DY~de4lT-fUpRc>Tdge zp`PncxL%+IVjAA}5U4#U)G=iN`dAiq^)!Xz4K)MTpR@S)3=#v_i6~;$3jCTTttZr{ zZl*LclDeKSrvLt3NKKqIlaTT!J5<&fw!6(3{BUTsoO{w>d?IY9Z_mQulp0F?TDqW# zIm3IQUp-l0xxGCTS;8=?uzc6JF%X3V{$T3l)VkAl@6fgQRd$q6V4`qkC1+K+NUcme znc76Z&5tBk0m#af;1w9gqJ`~lW{rCISMuw1%OV)ir_c>GB+Uvr?JkfMC%`_Qa zAP7;7qA4|=PN5-qDE*^U)Ja5|r`kg=JI0g1NoquDM6C%8@!jv=+G@i*O161+BiPic zu;C#@h`B_N-20V^v-mNcE>GUi4GtPw5>~lT>QMIxWN}HK9a#=6vd^D=84D{nC7Tz5 zxt02Okxa%?gEi_%`2(*B)%fA#rQ<OZg#&hl43PrbC>63zWdp)j5wE1c3|17wOXOG^ zjCoy6@q76LG2;rg_kQkM}FJCf4Ls+bHXVP3+6p0$tB~cGrG}XA62_r}ByhhGMzJ0oLUg2VJaZ zCnuP3iE8p?iRB{eEvrK`ix{|^lWoA{ml{6>Tw0g};2C$&lH>Q@ACrMtmo^8!O1&O( zB7#k{15LK`@YBhKklsIGX@;qp@`*?OA&2EunU89v9g9~(CQlvabMg=tUhO|);%va& zsh*@V(ftbbsYmbHFF3?YRPsflOKD++EaZHx4d=H*>~NEQm*@3j~V;I!z_N9CEGN5S$^yzQ!+@07L`(^KNBWW`Y zut{t%+~3AckI*UJJ7I@}i~JEY9%CU=Ps&0G8E`ZJB^QU=rVDfxw1jVlH4C!_OA{~tNjkX8)9taOi+OK?h?DlsQ-_?>G2Ln$SDB!e?sD_5qYZJyQb{5h}cZfIv_$*;su& zlcDdehUvJ6;3v)D>ot6wa0ko@v97R<hB;YhTOr`{#T2rY6|6kNMpIF&LtUIRV1M z!wWp(i_E8sYy>)%>Gv9@ge#gHcrqa00N;1_j&;q74^lInhA;W0s0(ux9`5cM@?}ji zyB^7R+%RB>-FFOBCDc(6F;WkX(qd(LI@K`D((bsibBY8$8O9Z?92iU+PWJur3}*5o zt>>oU07+akAECf<{esAPrnz;04{L{erE^5Z0vrjo&%gdFR|WIH9@fnf*TP6i-Bg`%n!cIa_7w4 z)T%NoY%_1Z=n{kHn;`{%ly@u)x?uSOZo zb>Q0s@lkDdo)U6isARcJ6|)b_b(AymqS*r`_B}tzo&b0&>eE*m9A6gq|-)FqH%rCRY+4$heUbkiV#ZxRX_p zn0`Jn{pzngl{a47csziUw@6Mb29sTEgN&)d4;jBGlkp&fo~rm1BP2vGqI~2EOd*?R zRvZyzS-P>T$5vn8oW;H*YiGyynmXW16T2Z%;W~&VQF0zH<7V4C3zNvgHAA{wjX) zz2>ed=&v(!dD+@lpzoCzw1FxsD}#?~f;ylR)wca?N&4`2As8)=QJ#)yWJ+^n4c(6u zj>^9@CNf)ES~4c~0e}iXs|6q}uhSA3S-j5qtEYZ@jD}?qZt+ys>7mZU^-OZ{Y|xx{ z-jd`c>un!?|5f@}np(T+J869dS?`uO`fGDXg>j;p0Pi{V8`JN{FW(R!bocXK0w_PD zCDyO^G~`PAJ-)CHvBDonxe>AdR{ZMQ9Y zMV<;o2&1JdDk?q$2sD7^2C)2|p1hLH6%-VHhBzKv!y5R+pa-^a%Ck)eRjpyH|r`AX_Bkz+AaSTR;Uu!LWTz09&w?Cb#8(15R7e zusm(EqdmtYkC;y291abYbzJupd)6VV#F;=&*|4 zDv!|aqyJ+jeqU^%{Y-T&EfcrdjQG?sT2Q>-1`SKet(SXfB>Q?A=ZnA{99AHAWG9mWQh^ zO6&4Ngbsq;+hsbj&U7EtJ@0c?{GxkqXDx3V$;4V!SLA%s^0_6dp_s0LV(POD4C+6u zPv~J8Kh8RVaW+<9vs zJxoGP&g)i1{y{~_P56z~cLGq4|6~k?U7hXiPlH5`QA4}HVF7&j;Z<^Qws&UvZUEBxH8VTGI6s_FG*Cgk+rC5m2riN9rn4R zMpI<9*O-!I*peWmI+e+Vs~@^7o6MF$H(k#O$`wM?#><=tI-Q+vc3Kn&Rp2xzxgAm8 z9$fR--P@VIyGy6PO9IDH++xVy$Hd+yxW(f`%SK$p7&5zpjKF2*>#x)O{px+nxf_|9 zP7Z==&5F1}=X9>Q(8!ys**c33cRS~!KN2ORe6Dxf4FI~CcRB0$-W!7P9!qRM+{?FG zU?W3G8~uSM{)f0B?K7g{PTGCg*HiKl1VyYa7SW?k3G*Cx(Wh}yP$^=}lDjOgs0YZT z<>g>VGysDHDp}W~`LA38MURB+Hk2cCA#MUHY-qzb2B<5k>>)Xqd*>@A6lixKssWMr zKlswv=KGrf02=grrU0R(+7QTKI?tg?3yWiOOW_v9Ib`vd6$Rr$H1(|3W#cV6=B=A& zH5-z6%nF-SvuSXK!gR2`tMj=rY+j(J6j9G!7}U{AucR9loEq&*8szP0 z=;DLs2?+X{AMm(W1aMh|-e0c&!&NOeSpFgrSg>h&diA^Vqilr*V;;XjeOe?CMqM+w zniT`rvRWCZuZ3nBhnB#&qix(0!GHvD!e(`#H97{Z+ZCQQMZSzbfkadlpW@j1y*)^G z;L9(mq1X!I3X4XP@Crjz^U(L)qI!a?{hsA^&RT-epJIm_evl1O)1yc0!-^pjFey$p zBx3t)edR~mY1KmO6P%LR`w0LoM(?}*xBEgCzj>bm*rqPm*THpxF+>LWq?kfXlxyFj zMm3IwFPSvaR7!-_`ILF{5vF6}6qkgjGg?`c>;m6(E}3s~|y(rzrNdv_u8BmND)x`FGQzdNm*3e$!r#(t4X9cU*lLkYdn;Dtz znQD)CJuOaS+Wy7|hFL8Ps zr%H&RZ2WO{1q5=rfJE9WDtyo%hpPjJ_T8BrM@tVCMZfh|xM|{E08HQJ4Sg-S zKH1wlpoIoejMev2xy!&%h|iwTTF=VPaVhQc&gf?Gc_)V_CYA@PM~45?L=#7Z4l$wt zO+H?Hno7QJx;Yua3Fks07!sHNNITiqfFVRo%swe#IL~Tqu}iBn2jrW)2+{>{?4fBt z^NAfCsy4srbVIIYH+??aNIS3n(QR3+0stgoq{7#aQzCCboD%{>4+=vuo(I1n3T8ld zOdAr!d$Ae)tniUyLxu6!v0=+MgkMrExb#ZYMa@3@bG5t%jFFzoE%K6jbef^vvA zU_@f3TEo~)^Jd9}?JIFaJ3G6Z=k<%+g=btT=gXY|+r{eFhanNa-RRH%B_4p{>h?Ce z*QhD(4uPm+gk$2HK+IQqscw;y#Hig?hLWZ5U2S?`Yhn`Dk`uPc!x5r~<&g*Aa;R6{ zanX}sMvS! zsm$HuP5yo7&d(*{J%B2-Hf=w!(O@^b>+%sz98|>4kUNxBD*3E(B(mXscarwF);7hy zq-#6MkqAvXo{`hk~l05q($s8CFpW6`LE9O=WWjC>dlu)!}o!l z_eX!P%Ykz=_@LXS<6;KynUkiBs=J8YA%Uz(03}{B7m;s8p%oHdR6eE^tfsM`dmzM6 zZJR}{(p*lmmzm+P@R{6#Q|wucJOq(kd{TuggDo}hkn65FgpA_Ypa?gjTrrg_pEm}0 zX0|x!bWKpp0rJRQrqz`-eM7@dfD)%`(EV~ZFy?6J^El9LKXsduj7REyQfK%OV)!ul zx$E-gV;8udh#;Q97y5Cd<_J*yPVMm5VmAnD{VNj8sQ-xkZy}3|xqd{=y38Qy1|516YI zA622GY_wmTzi(k2q@;+0Zya9>-Y*#e=}d?|Q;I0fMDj}t9^oi4kYvHh;JF|5MvwY9YXZ(;EW$n@zWDI)VRH=i(_U z-*tOO&$06|;qnes)VtdMEUrN=K2zXc_7FdZ3MstP)wuar#c&zWU`7-YtClSFMF3os z7+;=xVC5gsKZ(05OlarIms+yXbDtjQr+Szqp7v$h?IevRtW%Yj?GkjiI^xL=e z+dl@t>56#3CO{=6LMMUp`_~rw!7ciT#d=-|w8>@62Ka@0?M(klEXq3Bm68>_3XX{k!`2 ze(L`&{C@H2eRF=U0yOP@?V=>$w>Or)Gdg0uTLztNuUrz+|NoSHPiaMBQUn4 zI>hoWM_ZVrTh@MJhe5V`=Z=SrIL8+bS_k9cbNoyimGN0^`(5)Y;#;|JJeN0R zB8Tg*Ci$J$E`nzI&z!w2cwu3If6dX~@$AKHKbmtzPp`txE^|e%+V>s;;8=(MrIsG# z{q)=6_S?yQN%X&Jne;u|Y55n6`nBkPFTeos0(^_M`8QdgO5t~Y?|1I;-v93WdNsGc zU5h!Wl*Q{~>*ZCJ+=zG5-ZYR_howqamXy%jj&dyM1C1_@7$SguF8I^*69}F1o<<#a z&m(_L%x&&&iSS;O2sT!lHJkqzEk;Fs?zjrLB{n{}DV;23;l#b(z$d>&DG|&03 ze5@b5LEBs#jX#n{2 zaV%jy!^Feql9T=KCf}CcZ~fmZrmQa>%=TBn13;Yw7WK7L_xAfD)``Uy$Jq3xKhv3L z^*clObZ`+*d8k;UyiVC_s$oefk&c%XG3p~V)ab>6(WDlZ85v%aKdX|LrbHbAN?C{1 zzoV8uktUX+pjhaz5u*+NYfn|hK!iQ_J#2SV#FhkBYm;eENcbKOHea@)$v9@N{}C9; z$*9fq5^!1pBttK`v4cAskJ%f%(}bRu&A^CU5F{=Wa6E!N2fx3XT(R)>RQ>++^si#v z?>^Q4F?CY#`-BVcgcK+#i7qB$$j1o`nfMoq;#QuNuyvfb0Y<|*83_uR{?1*}bOl-% zm{|KP|2>%1V$p0b9pNFEG)gb7Km$ItQ)`>MT))}|c8jd&Vx9k7;j=zY2uGSY`uXq3 z^d4DSz(UQPAb?eq)(hyLKZ*Swo^A=WHrk)E&tJxKUxsr9fPsr0TA(+tNB&pF*0=3B zEu&YD_p^;ysXHKq_9!*?9RvNVt{pg<)k_6U1!K_U``x-6X(A7*PDc?dT|!=pi35y< z<%Q!E%85cA7RRB-0P^vwE376XJv3%!;lNm=njgvRPN&kf4Hy2*qA0b=+=D)U`XM2-@5U3*VK6#?L=q*Es$Anm9~Yr z^%vckmDUmJ+yklHt|%1k9mu^dK?H2+)Cv2?EnOUZ(Z0rWuk%?^-5Ha`kw67*ay7U& zF+c@_Gu2&DJ9}q;c)i@M#%AT**0f5Rbcy?XyI`;3!bo$^m|3_pIhl zzBMv(5F>b>$qC+b!8DlX=zMDy+>jnddmY*mNKS6<^*Vrg{tq~QtzCOYW+#k7m_m4{ zicw-8y9l^kwySoque^Ca6__l-0)PgI%_h<#4ZH097A&&9Tb_L}e}1x$&&mfied=S|OSWR(Fbs>;diO;&Eu-Efd~0OMe-d(B)->N7BKzYeoibbt_>N@V))G z>d$A9JsuXy!DEm{l$qJ)N3&5k(a(0ba&x*9UUD&!l-f%BhZVSYXR&9 z0$+Cc+64g!7lX$6^$A6^dfzLD>*jEVde8zJSADb8M5a~99RBA4Eg1$+splL8<)NV9 zACo)kj4Q=S%NB%4W!MTums9-zw#ylApS8Q~{|z`h%?zXY z07xx2GwbC9wm2^<@5oCkFBj~szgL4>IeHWpF~)cyq$k<;W<*nV zmSdmhwM-zaQY&`68C*+$^Gh&!`_Vu_48BI+@M@BSWKAW$T&Ej^BQg;R8U}_ zBLQ6%zslTkVj%??t-D&QxZCvRB=>xZWPEZzU?wL-7+Yn~iYUUZC7f|8qDX})Eo0Vlu$@0-qnc7dZ)!{zWfcu{^lNTZ+{WJmAA^nrubtQrQXKeP1EMJukW@!fCtLzmmg|OyicfMC^(&cqL zKbB%vV9KBsnq?m*-QV$L07`}sx}*vgcl|UmkS!~vQnmJ{-u-wY)9K}#-(lb7KcHz3 z@?|?>9rDB^awyDjjBO3z?ynZE^cbAF<41>qw8Q`8tESD_O^=Sneb+QTAP>4m(3*^) zvg@>l>Z-7FQIo{v@ z-{Q%MA5H!WGx_x2Qh?rLLeYgV5irrx{rlj48Cd$PJDZy8>s=17Rxu)Q`pY#%e|Wlrj`7GR`)uZH@_eb*P}wKNgR1MX4hA{1F>8+H_4W0|MQtGE zd2tapI_%MNvpfu6jyhKO<)Rk=cMbG)U$owj-nZ7K3)siL^>z2T6%TAc3eoa;dXevg zSqMB@haa!^bAMb7&&~bU#XDX0=Wma>fuwXi$?%=M;Y8|03Y#YWc0=G~(x?mOi3yF9 zAqS(xK#Hb)CKP1cm}t+Ru>Tbj!oumfI~;Qi_|*kp(26s{depar%9my5UQ_ID8UFmt zZCrBHv5RJ%cqE&Q%*Tzb&mXEbZ0Ey|1`zwOUQug26cH&NkEhkEt1AGDE}5Yn{SF}7 zxYy3Bz}I%{B7HEAysP==F?jN5;lG4?&YH+eBd*NV355HugLpsT%{3{JKj|UX_sRsCJ0cgi{g zgWcsJp2?tHHN_%lzhDYzfynT|Ti??5(^bnE;Hh8$^|`-eRcCYaA`nQDC9rmWpGYxW zH>TTKTkBjX*acND)CGKLC;*Syi}up+q*fv=GkGjYpgp;`v_C%fU#O&5(9>5eRJHrO zK1FBWDu96P8Ysf!6$_v~u==5NTzGO!e(9X!?eptnON(D8ak&TZ3-IM|&DtOeZBEOV zV{PJkmw|xyQQlM>$@qOqm9)ABcdT8z z=!qY|$NdHnBn+$hxw%XJyJAL5iPLIeNv2rjJzPh~`(Gf6rcW0Zw2dL(^l| zLFv~5sj9$1g^`(A&+dQsRIi-(n-XCZ25SHta6oBngx_jK|E(tB%paK`Mee|nVUr;h zFXVBi&*EweE}%(Jm7z^aSf4%h3MjV#Q5pDKc^dy-s|rcfKmJQCeoWIaX>8YiuP<3B zS2U8ph=Pki8g?CQI%pB~`8z2vEV5`4J;7>)Z+zTkqmOe&2+ z2r3GLLXn*>2Fyj*1r%~B`xr|vbr5h1hq+S-%JI$FSE0}yH>BZA-}g#5;?V>uYFcCg z&6>uFs9jI`8@G?eW63z{c|bo~5PXI%lC}cS?Kw=z#ItWIL_9ENAD(5I{VgU>QoG zkgN)}OoXGwh!guDqbeImP1S`7d#*Ea5eNh^(Jbg=S*Ywvsw53~?rY93MCOmxU0QQY z)L#nK>K4qMbLtJ=R23brIu|cL(2=K6hFNS##CgebR}hXQ@6nitzM>Mt?;zanMVb5 zW(?}pd569te%}0ny*eDhqPMv|ihOQTHO6t2pzw^d}L$G&1UEzG2y?}bq$VQDw?CTK8y zLFj}7@z;^r=G05*ghNPSlsB$*QZsFF!n@qFRI?PXI7QIBDQEZK9m8@A2(I zB!4p8kp+>*n1TM)5JpRl$_-X>&uj00fl|R@CbmPQoy_4is~oMryqhq#^6_!8bK$|o zlB{iScL0xH+c!Y)#azu{KNoGhpWOp56^8={fz7ft7lDnq9fYTer0B2iF%9rdh%p0m zeqcSme>O(f&b($a#|#w;T7RDM9LHy$sGDMWVIR9*)oyjZo(~+%vn(uVn6jT>W@yj_ z8&(T*FE~Mf3oQMM`~`k)+|S7m;i-mCUqblkgtFdrj$+s?uA=P)5y-i8mFC zwzTnNh@xQ_yz#Ba{YW(n!Ln@u!}U$r0~GRg|ZRyHi60CNx-;h>`Vzjf|5>^h&K z#%xtWj_WhHnH(3J_4WW5$%`DdP>gL4|8LTyp!1Td)VsIUTPZ3;Pmm}klI^0wOj+pw zBLj8s#BK!KPgaUp8JF6%jSQ-rlp9Gd}h7k@4c z=v(?ZIXeYj_fZ3|l_ZPDr{iiX9U(&0ufdF&$+xtQ1|31e_oC<^L_DYqRo&DNBBhoY z6gCCuVr8++%=BQO`Yc;>OsjvbjSC9TS~`{1Z~YZ9q^*~lQ1k25DT zgwROb_1a`nUf^2n;`cVOBmdbatCGg6i@eWRgR?fxwGA=FZ0{Bf&L0v#UOPR_zVuAm zZ*iGOOGsa;zYg=&dH!B9M+m)*s9FMYO#+b7$Hx|cAr#u*?^uFa4knBv*xPC!9m5r& zsMuj$e42iwkf}22%qc#6{?Z(aJCKHUj?%@8L-g#UcH^yDK?Z}D02khaE#8V*V`AM^ z8xb0wbTbxe)@zs;S*T92Q*2sevLi*C3pWas568&~zYi^{L4BaaFC6G~)}vAK+f^t?#y^j~jiiSXUyp-n3&A=05c7d3 z5DZrZ(FzEy6o1h5^X2F~g%!B{qj9pJl{@rgWw9mrDBDD$-28zVgb-gi?(DeeMHy;7 zxbDLuP(nBHC2S&@+p-~xB~P<|cM!Qjj5Vdu@t>6ZOV(Hu>21~n z&f(S^M(g)zPmXbWQ<=kzl;o#-YsP^`#0^67*-C1{vt05;kJ;uM_kL= z3Jwg4>@)tz+;1@6f~b=-2yVd5Pv^4N@izTHRu)8>2IihX=r2B;jRfOZ0rd5kECi?RIm5CVx4c9D-d+->%j3D0Pc=(sY?@Yp zrl&G{=1TQE)4-%S!i1Cal5u>L7>n$U)?>E-P&UN&+%u!3X3$qTVo^RA4MzAK`caKRuR0X5 zxU~n@3iwyi@|SvX$5hm;BoGEjz?cCPLm{gj2i=Q-mAEUy&*RaE*RPt-z?eo}gv-Iw zShf>1?ENXf7!3n?a^hKl8LdAq$y#;y=Rd@CN@sB~dS%C`doFDA_DMX4FP}!x3qxW+ zP6NDYx-zzm-_btKoZdHmT`3;o%RY;7Y3HGJ5F(0=VPxFU(X;7CVNA?tF*D2Kl#ieZ znx}D*mi@rLRQg#GItb{7@D`}Fg&2?D!YM-CvYR!ub0y+$Et60ZB?w}Je$#c|5y9YH zM7(dV`>;}~@yP!~wk$3;t=g1LE=5aXpfXCb$l%(96-ZL9+b5?#D94*>9_4;h|u0f{r`=LPi4LiiiEum+L zLQ($%WdfUvk>A!R6}N2JTr_`^79c@)as?u?SbpeulOa+Tg3$k^4||+!yw4eN?~p~_ ze)7LLicThQ8AZ}bC%#pc{uu*Wm?9^hrq#kJBMXX4*w#kOgS_1uy}Q|Ev;Knx(8`AS z^%j>wT>>(*L4fI(PWzFaP=y|jWdsxjABe>udb@dX1h%Rr7Wp`PF7elt**Bc!5|R+ZToFTQEcv*Z*Jsqc+>z} zl2OhVf}R!GraPT?@yUeFeHNb3@E(6OjA0BB&{)TNB z_?wazgUpeSd692WfD3LDtxV$UBaKUsC@m>PK-fG3?R5T!sA1l3CJI{;TUi)HAyaqQ zNwyRl|dzlQdWrKZ`C;X=Lzvg_E_J`+AS_Cer~_3`Dku`^Z9Cr^_DM`Y=C}k z2_NJ1t{~Tc+v@#!L+qSq8TJg?^@9Td&J+Vi%;VIWl_-zbUM3(m#*N+-crzSon z@WF^OR@&*a4s^{tT^Rrh@9pjXcQ@1RsI0C=&`jBX`e5G zvY?8EYT|E~B4$;wBvzquvNEJ0NfNWRT%l;)$E6MP0A3q{2G*oUTsl0VCYw!CS4Z?O zr)^;<#z#xK*ZD)+kdVQ|y)DST$cw|!-+@uLHB){f*^VAK4Fnp6IPuy65VFrcp-*Q$ zJK~B5WzGGVz(}1;PnAb8;e6PhW%X&c$9mG|9h8sivyQYy1=2r*)@2gVlx zR2R(3Aey?eDoK<_Nwtx$^RRCs^8EcVM0X=mJV8kbA&dJ2F0jK4xY5$l_JKX!?9#gv z0k9$k_5zQQN#wK{9ISKqx(6| zzGwd52z-795J#eRiArHXHh>n|M``yy3rs5Sic8lxlr}F&qj>H@CH5v?tOpcffy*^y z53`&pE8%F1;u~htZqmjTbppz0WCHVU1J^T#)xUYtFbB|D^%~ykF>f4`uHF8E=9{LA zQMt0_A5~b2ocTJD{6q4;%Xad;PsTxbEJkCC zRQJ3r(v%&Qs-KcwQ4)MR5q=wgZ<#sl@Vz~y*II9Pex{?tZJzO+r0Vo#gA)!%D^I|A3TC{c%vVO3TA=v?ZfS?@RSr8m7*dXDMA=(c23QQ8lpZf9EMD4BEVjIG30QP-RbHa5CkHE&cFpK8&b}eA zO~f=F5x?GJ6bHjUiw~u%APT4eY5Eo+TI%1x9}iSXk2f@$4-(!Zy--I`vXQen8D z_G{-0B@Y4dHwjN}DLm}esbvMjFKO@NuoK^Wo3T~tm7P`1!l@aib4sP4dsx*O#~1cf z^4=L%;aq_pa$*65ypP1<4@V!PYgKJzKt5!qq>MwQ4=H&A)5HDsv3_TV%wOK8C-Y~J z)<5jR)nOEa*tZdpN~ZgNcHK?6C}sQJ#LEGJ3U^1RwcjYnY$~Z+IBN#^2Az*s54e6` zT!>ZIdyZW}Fo& z$|n)oRq_DKF!4x|Bsi7LBDbbcWerayR66zm%L0?AvYCjlJI*;41IrKy=XqA`ynNJB zC_PeUazXo*2lw2LP&GSZ<)#C6U=&(BNg_%}l(aMkTj6vZ2?(rtyt%U~9=$}_u>M@; z7d4L$I!SMLq1w43a+x#Weh_kEZxbLr5r|xICJUB!n<$C?k0tN32B3n6uK3Z#^W__- zp?YW(wj~_s7G70J)3R{n>WJy?6ASPMPaVRAWFBc-mC1Royk23(;j6}3Rs{ly&PV0e z=5m$5WXt(!O2c_NCBitKU?+_r-kw9%Ox7y8Ns0wFkm`|KVmGsoytpyE{kO`YvBdHT zou_Duu|O&xMq~zvVZ}kMhhIBYLKi7Fxrib?wvoifM;}yGQ2I?LUz(B@iIa?$Mr=99 z$#s6PSN#KRTC;jBu1|uhq!`Y;wizewPBaWMt~{Nhs=~>Yw5d;9kp=Q#c0;=w{;{Ab zuMpq-@FBkVeNai+=XHx-HClCJ_jOz&=$H)WgWW9MhX4Dzg<-2G$-`n4-RT-$IC_WdX zlj^fo@10X_$LiJMF$w40Y)Z0f2^}nedWA7ARp9mgkb^|X|83Z?#N+hI!cr@FfwblT z!rxxVECR|AxK}TTe|Z?secME+Zt~Ve6pf3?jtjV+c%smlUw(XiG;@&)P}(*ZAR@pW zv`8b3WdaJ%p3etAXXNMCk%M&gUjS0>`=Tokcu@YiE(S719#<9r<5g8Rb$PGfG=3<*+gs zUOplvMn-g{nY4%yhqCe~Z6!NK9ZXVA@~m?LD#~Yq%<=`UV%Q;G1kx_lV7Hms_=Yu0 z7f$+6M8((%t>v)a-Z(&jWtU z+IMRUZh*9*0>EKpKg0vzXkguX#D_k}tP96%xnXLAjZsI%U8^Ys+Mnvu}km{!i?$O68Sz=vtjh@6(Dlm1$p*Gxt}N09|Fw|9Gy8PX_q zYdcZO^HEs|kz7B&Q%8=|I>inKb%TA^F^^-9Y)ydTZwhRJU$Mgc9(zvQ8_SKoTl(Vh z2tE}O<{xxR%npL48i^>P%s$Dw=2Lutvig-}aR}Q6ef(WtN(MIGmDtPsawK_t?$-X* za>fF^RyCf&qTQ;jQfq)-n`EwrgF)Uf!;;}M>yY05Ziy!3ZI+Bh5SXWv7kfbsG2tu` z=^Eu!x&@cG0n>jL&hPj4Y}#NVF-D{w-c{P zsArnRhUx+&sON+}gX?g?%&8Zc!>#oG1)A(>VeO0#D)X>z4IR##uj|JjeBZD5q}P{0 zg0&G9G}>rctRxAtaQ>BXsxlw)LqkJJlr$i$L&s9B5PE+|m845XF+`X^WOLV8G2e4p z;k2AgTVl8U)7v^45_U&rc~D<`oYq5!xe(U zSgK|Y8$;hM+=FH!L0{bGe@U5H z574kR;0JKzNEBeIaIiQUBW1()kdsU)sG0o?&b}=~@BMt8J^vJQ*pP`E<+E<7X_8mL-ZPSt;N!{0^D^^~+X^Fx5qOy6g6Lm3OonX_ z69ao;R!Ecc88bk%k^jrf_V)DULz!cHEpUam z%FwU^&;87=2hqg9g&cW+Ke8|q5*ls#CHVtMWE;bGZkoZ`$S9AMmEJAm)dOouv3T=1 z66D68p~LupK0BVATu!!M^fMcHK7&OYrTW%ga&T<$DP`F1cW zLUp-h^YSsSC%=+6s*ADWJtJTRU;iV&jDo-F7RRD#amTQN(hYge># z1K>b#pOlx=uP37GSVx@6##T+uYyb&Hk(``MPIi4qjUxwx=EA(whT+O0k@plk;r)&T zMeVShd4bD5QOB)dyGG7t~|~&jf(T-dzJ7 zaM6cMxdfvu_OZr8>H}0>)HNPbqN~t-53|6dOXo6=2mHmNe*0DL0#p0T)zjQBHsWsV z0UYcx=Ypv-SALg2Grrpu!krg=I{<)I>2e$e`S2kqw#Z(BwNN4{z}4`HCo*v+HtVqm zso<6%cuO-;CwP%xyTn{yOjni6hRiX@9E)2JE1o7=CQDKNNKwATpB=jpQI*phzlbRxusx&DXdF|7ih=yXeG~k`mcrto9HLMfa6xAQ!MUUBk zfDkcCmtPfs#r}1yN%06AXLB*gHeoIi%b-A*h)xE9CEg8bdo`MJY`W1EZNY1w_s1+$ zjOliH`l#8jk4BCp3>QpH$HoZ5Z=Syn306sd`?5WI>aLvbdORU6fpx@r7T+O;rc%FD zS!ln=aXGW+B@{pE)9;!cPtemAjHipJVkE4jngNv{`YBx#!*;LhPI7hQ|Cw{T{q-Z{hh0^sz|BQE zg-O^bTT@4eo0%_xwD)UE56$BAA!10a2hBjx)z-J2c(uV)Y@(6+eAn&hTtg8nRJJhA zTJ-{Z#6~3nXD+hb)B=tOt)`}pANO~j&FhYgHyL0P8`vxiv4D?iDO|+6=;G{ zhKYTzs)m?+ta&MwWm!FF*^hs`U$4s4N@Cm;f4uJd|C0HfEY*7>R|QwR>j!r8dk_I^3Dd^vXHG@9{CW-TC;b^ZirjWX}kV zrPBN|O>_}O*D+0uYd%`AYss42Ov6QbE#=fyN+$q)?LhU9g)3vsok!|{##&#Gl|!fC z_sR+ct6u)BiP^|AGT4-x_Z#zMVON>>xUIt^)%iNM<4dZXtYDzxi8YrLdGaj0YIb66 zZ+5!|zQ4bpv|A-*?YmKVo)dH^*H(~!x8`6&j6{URm6RnZlVD9R3=&K^H5k5ITwhvR zx_u(dT@tK+?%hAZBR*W@Z@gb*N3EZ>wwG&^h!n*rNQ1=y!HB>@TXXPvXt}u+F}H1< z#Wt1yfx<}PES6NQ@R80ED>(XPu;F{|lDpqB`oaYQp|^%lSm4QpTU2$9(iv*4cYe9- zch7eq_QZLJ-NEG}KDV?04Oz0SMKk!D@c-?%&~PkQzjtTo)YM`le*{Uk8&2+DuNkE= zb-dkq7>@KoiP%j+lNwgNTR<~V4|IP-LK=rZ7#K)yUpQOydc9pZS>~Ev$Ys7wuB05m z12F*n>B-oRx2NO%iCE#Qhfl?H(eNW;an*z4KWaTE1wGckt>t8AYSE$W;FNT{UXX9R z%okwRC*wba{#LeY3@|es;ZT4Avw@R+Lyg{g_V3TW*!1tczZJaS9PB6+>#CcfV4Hpp zK41Ix3dwE1$JFBjD=N77<=wqnp#p^U*vScwVU5l%1J3YLTy2u0bH>$s7mqKi{+v(( z?^jc`5r@UK+5h^#L$Ms|Q|yhb$E5vqGlX%EDog!Qx-r_ki`~@8Yn}a?EV@^j#mFWF zlHM)!mB_dZgU_VBsru5gx3{u;q4~SP`R*nnH7DKoI3%Nu<`_w58gcozhBTj}vZ|v7 zb;Fo|LC&(DTIY)9Aj}_$TCGw5o!O-$_?+s;z z-fZ8MZ!$Det@kg!oqRq9MkHS+2pfeBDT%H~-8M=zY%JaNjX{W~oA-EcrZFiwJbd+f zobt46V|HII--u;SJ=v!Y@_w-G;6ytJs|0h|fG<@(+&2|w2h>(yJ14+`DY zD)~RDiPb2kK#^H<=(5j+{&1K4`C~Vd;=TtOS{v4Ui6kxxhO*Gk{P@$4yBUJ{s^xZ< zzO zW#Q5(;T{w>T$7rd}rjw2r{xx&h@$*b`pL8AGVa)x)Jfh%{?uhz>xP!d5!eLuYR z^x%1!WB-NTUuD$$UZn2cOvJ2T{-~}&YtTA~XhfQ&Oz}+ZC11aMfN+829y^{(rtmko zgz@++R<2N;J?g_3Z`;kkkhB?A_tt%C^A&Yjs(Q6pI2|-FZ<_?%W1&R~rnG^l)g62s z{ZjhLEoY(GK1-fjqU3xy5ZiL_$)`@w!~V}vMBzVbZefIL_h zSxQX{3Z>~%igrCA0o9RU_%k1MJ9X0`sHd)G(z%^q4w*mCc{nh>kszYnqIj>nD%;(aV&T%NtKk0D_5^q}RJJ4aNq8*PGBAB{navaTkk7jm z+!da=s`jFu!XV{sLN+&tT9K1ImasC1Vj$JR#@sCGhVb97k!XPtptaNZZ?`cEnBPU_ z_vN@y1b>0Jx+e| zs_C^m;LcYXdE7!EhnJxD~5Wf`RKVPRUlfDF0DZx5vl`rE0EH{HKTm7ZvdBx z$IQXzd2$;D3)T1(x*E0m1iE_nWWt7aoYxYY!RM+KXT5Ijh+l}P)dh3epBo;O!d|~x zEMdjJ7W&g?YaTuost;t$$Ve}Q&L{GiT+FL~IK{|wPPBD}ndAgBvIfQ8_s?pUGJDBl z#pn%I#rp$~wx9diADv%)i&rRoAaCATzoko$_0@IMdx3?YXGEk^lnCximoFTWE&6`D z@n1ok5!5+p=^5({Ugm(p&2GMmGC&M5{qLl6AS|IMdV>Rxeu}6bmMN!G3d2jH1?V>_S5J;%^<6-;n-@gDqHs$q~9aj7G$Zz(W z(rXV>6-HgohP|%!V1z2c@Br+C7Gi;C!2UTVIv2s@x}A6OtHl3EahCbtLmBWA2QEW@ z?K3GxrNBS!?O6#Jd8ZSS1vxU8M6*iZ4o)4iQ=6v{ulk#<)31H+{mPU@Ea+VsSm=_D z%u-R5BBl#F=qDMV4k=@lwMnKEMvG#dF639Up-UcFTs-JFM*U_ED9c_CzB)cXpZ`!R z(u9ZR#4oC>9l`^d$%9y6WNjp|L(N5*3Z_4C9bN&IskONhxnmkR*q>xMyo`Pf1X?c2 z!Rm>xbj@Je7_~rTGJU>WvxWRoW^iqU0cEjUrCn!gY&H9mZ`sya!%Dn#U6TCMcX%dIiPJ#4C#4U+ksE!6~&O0xBuL=F?Wqc~;t zwAncGMzP68k?iecY)&T@&eJ>%!ZbX7Dm5sz^=7lDy60R@``iSg-XlF96vP64GgpBK zM2mG6qryK7?GMpcvqgqlAvq-9Fkg7ILmJR}I_w&OsI(HtPBVWaC=iLJas~DNpk)OT z*qfgA7K_V4zt!>cJtcDWDbUK^Op6@xV})}IVX2^|3AdThY@vkU62~?Fp9}D#cMr3~ z37;TS$ltlUa9oJFvaB$`RKa&tPs(k6u9`kwKdx zhePJ1Qjrkw%#e8y^TO$g%n=rlPu7tJpqE^)dbayrIJn~jY0Uegrc*fB>_Yw_nuEe& zoH^|3meQ9FHK#`cM&y_kRDTf`a?I1Qkf4*yO>Orr?a)I*ynThuy`Q+xj&ywOk4|Wn zZ1z|=2IvZA)RA^jN4o=ck2y4ce?C}@Gfj^ofc;n~+Fx%*tz+h0qdw%cs*bxPwCknlYc4TW>$MKzF^rb1)d!%)t>yh@J|&CX zIkS+^5BgNap*XBtH%jus!=&|BzD27xH0t`J*bk7IwYBx_(HtfwCObR(`w{osc*+-Y zkM+l4{__s2_ij5d)Fh_KCT(3*rHCtov1k_}I&Khbc7w9l@8q)TGZdnS7Nyrcrx!6H z1R)rZHOJP;<6&9)VP9bm^H;7)=45Xlq#6giD*u09%WvoV#(vzcN|FZ1f25Lvg9f-^ zEL4l_9vhdlGm~RRS@0hN>}`=*NYijwV3t27vQ%U!3|Wz;njn`}r&UnO7e$Dm7UI#x z2C8zA<3a6G{CFq^GMukpZuH^eFt4K^j220efrxRsD(wRN@iarSH0FV%Q1zSO$ zzsNI2zHR+_e2W+)@;UiW&-C>4;RTRU?94%fNZS<$o;L+u2yP^1vDOsEn{}i?na_pNRkO=U zg{5m^p~sO7<07$(a%I9%2*+7XbQGynYqwagWo&_aSDKub{Pu2+Ap=pIWfoF)1dZcR zaZqM|a}v66#ifSuaKd3_Dd8L7Mxp4EAK9%o`262aE-oxTy(sw}O%MybulE1KDNNm> z0Wriw%(JJ0;OvGG&pyziym&&l-&?hve>8RvG#Yz=QX$s z?`HMOKueI5lfSQ}b}ZUMynTH1z?xi{3JD=DImNW-3Q9^^6?3vh-vM2JSy>stkoN|s zps<5BeTdNeWwu!dc%3BUIm3`halcBKMV%7H5}d9;e7~Dg$VtG8T~GkVOYKFfE&3Dx zzC(1!uYD)9al@Pm`8fcO9v0-&qA#0{#B+;WZ0$doSoL{HohR02CCH%d`B`X-vJR{t zW&3d}8oo!;O1Qe4c6sSF$eV*DB}$m6Y+GX>x{Ukn=&;>@#SX4z0tfK(RR|LyqG&u5 zUYpOdc3~%MpjQW^ApaBYIG6ck8U9BskTfl>DIlj!PMSM60f=?gKmO7CUs3yC>2==f z0k;3mfWJp|pF51zsU;&iYaa{Zs0ys?ZL0S^jvanY(lBF#uCZGTgu+SSp@sjf-ADNA zUr@o0%ll;R+uWy5oz8bJN%O0?H$_d~B{u?JiT?n+iG{Uh%G5xxUX#Gx0Xu|dWWVlK zswi%fPq$o_{4f;u+ri&xz?oUt?`HDn{CR0p#)v9aCd52hGk}KDY4$e@2p#IHc~p_^ zARXKQ<^FF5n9NDB`lS&Iiz3e4@f)YLw(X|(1^-w7m)4DHPFwl~2dBA=E^U(FBU9d; z;kdv-C@3D#*HJue@!DFU)CBfGO}z?a>pG%zG18Jm>^Qf7Ea&FT&+hU~{i@LV(!8U1U;xD? z$G)@Sf1r0=vQEeZ-!{c&(|hd;JIP|1e2W*m&7mQqR4XD;Zg~EgL7WecDjw4Cp*Kop zOj?N?VG_aa*@vyMg~j>lfU}jWyV&sH%W92})8lqaIwteAl>W9g*ir-|n>_qj3zJP0 zIiA7n;MeaW1$@aa&=89wLjAX=p=;mJ)tKAkfKa`A!jO@JU!N2zk$fX`Z9c;8g@~~I z#u&N}a!~pXr8tjYgq!y{=sl^9!QXc0v}{#yt6E!x4BxClRm;F_?)84-+{>_08I<2V z_nqojlnYqBML#DaqgVNXlbo zUR$9j)f7`VFzgW_Z374D*l(#kl{u6W;|or(rvmCUI}gudBpE19tWegXr>5PPsSxW7 zU)L@*I$eKccz}UXjPJ)0@#X7o7J*q-V|sWTBZ|DPe(Ag(3O8Mk?(xKByRM5*&i<_J z0B}_FvU+krHow|h!Yp`y>pOoP6n)g)!`}a+yJ$khY zEvIyYsNd=4&tce!{rPA~_)1rm2!vhu^l**&*!y0T7D;{W>B^KS6QmLYBFQ)-q0^D$ z$G<)ac%CCMcd)f9Ghsb}p+rWWTmFD`?BLj9;u5<6`Wc%I6QZ6h)P0x8cwYdbO+FsM z$zggXeKmF-gi8UObk1Cck8Z)14m^PQ!`o5y+fk##ir-&(m=bmZ z2=mkxn_FRKfTkuH6>@(b{Q0EGVvK##`$^dKkJb(pcpagtWTRQbr***&7;F=8I|;OQX{1bqfjV-NT2zuE&@0@3-={~JZ;`P&FZzup zyq3D(@}AHAFPPuAnFpvQs`Xdw?3Zo(T9jG72O(J2R+w~C$ZW_>uePF}w?cb*DV$H!$x4q`#>K?CXkntnaJNzdeR_Pq5n2f!r1f$9ULU18Xc{-17yg^Wqox`UPps-`Q_B2bmjh^EK3u5*mT0x|Lp!ik0b1hfv8 znr#4(_$;HM>3qHZdG)|`^S%MAp#i?4XmAaOJ{1nWE0r}Av}|0i2Z9PfBa6f@QOa6v zJs-#lsPJ%FYH`_pB)7sYX9o!x|88L1{m2&$0*QJ!#=0?RPwmm7p<}Q{P(bnYJiM3( zN;=mEDmR~~yb>wCD$LyIZ9L7--kb>DuS0zHHKJf}QBq@SCM?;Ca)?xNJ`Z>ulakKB zN)$rwLO~!g^?;P;G+Gg5=rgFY$3r9;FoQ-^1X(slaGx$5e!3O-bAS$WTokLhb2-e0 zlDs`wA-N}wQig#=nqdJW8J-3HFALjh13r7Am7%ZAbvZiXwO|GJZS{tJHjx%Y2ZPR> zjZBPX%%&z4-QBvS?vDcJf(YHGFx}*h)^l{Av^Gy?Tic_tbaue- zNncyrS`wYivZrRrwxw3EX3R9hgRQPM#2^p~L}pp9Q{*;n%ZsnNFgaaxxV~;UdEGFd zEGy}qLtR8!XAVA-XJ;F@wOld${N)uisun& z?m-*pET`#q@GB>&%H_D+2!XrRmr`asz;Ys;bzb{;{ajJsPGMe_bd|x(&cVUCc6J7M zcAnq1xARA@=oB)uxvHmS%dkMJ+CM$$Ih)FyoSqNIff!k_qG@Z+P_&$VAcAIJzw@ih zWIhcU8L0%|<^s?oA0Bu>uz|Tu6rvL_>-8+`*Z+iHdOIHnHb#HPlK#?AV~0f@Dw@Ql z9)JqG?33=cpw~HJwy0XJ^&4#Dgz*gksi_?azdbIzpXqI6W|b9(;bD!$)zJ;U40gV6 zZ}kpVcRU?UzWkchDhHEj0me$7C8;8IEbAj+MM?54j~@t1G^^L*Q2vjmtBQ)UYuiJ& zq%d?ycb9ZZOTz%t-7Osg3Ic+3=YR-9cOx($DJ|WMw9?K0yx&^?9C+vfYd?G6_m!5< z-P8-g4)@+XK!6sfy^R!iDrpJkFD6q9YSPz{#?6>EZ$6nFD*md_eBP^hz9I{|UC8R) z^sPK?zdxx7y>t)JY!XEyfvh)V_!0cwQILKWWOTaAUUQp)*XOayNA}?dzl!j zjp`UOMgcy^wABNcRB_6+3ZBj~dLDv$u7h$~UZ7Y?<dd83g;YTZyv+$@2%{olqa6HIn`jSE$KTe$B5{&d#iAec4Hz-YouOJ%p^+v zp5y7n$FT?(et$#E6ONS57c6S6Gg6Jl1j>~TlhV*#zYbYnfC#|r7hS@_q(K)~|NQ_b zh?|AQ#!Y2SLSo|lp)Wjh!_@H@)l)~+da3$v^{~&NlfO&&Xhx#)PA9o8l;ACLKFwx* zhGnh=9zM80(cghm_xN;F+jxG~mADHMUqa0(f8Kb^;ECjiIX~b(_ZP&-YNg}IP1P$A zVvHuJN{T4YB-4CC5%=WrveLP6{Ej6PxOOvGVV@sLZw~2loPi%+xwYR{YZ;I|-wpf; z7S7pLpDzyNBj%)4K?dP+qR15{Sz^nO;v$3i70_cMxB`D?jPBMy>1dvW`o!`*rGzGW zPnZY)8|=Bicb9(tf$!G#Z|`y8MAGL_IsM6)n1C^Me#r@EBuNovm{1!;jx$dr@79YP z&R{V;d2u0EhG{BS$se+*?(HeHV*^w&vPNcd(w{?XV};>>FDp2KzaDi=O-yA!r9S`E zUW1V3}DEf(6cQpOaioCLzn#IODq9 zI_kH8K^j32n;ud=jGp^mFGu1M{<{wE&`bW8E~M^Op0d1rJv+w*zDtlf$(x{C&-Mnj zGTn``@5=%EBWGx!9ObqSBcfAHS1RPrr>w+SQDO2N*h0-zI-QPzk7EHi)PNDxnnf$0 zs>_33Se$yeA;o^{q*z% zL_iL|`Vyazpuz^cd(*Rc_d?XH>uJ}g>5HZP2N`cxniE#d!PKx+j)ZTw)knJpG*BHZ z!V=c+3x_Z2#&^Bk=nR%Vff#sj92>->#9q{bvS|L^%QW}(fY3OzcCq1-y!(Jx{l%)^ zcK2frp^Wj(!9veJk@eu4@rId@N9SmkL-w?MGCHK+ZLMO499k2S0>UP;`w8P*;eWGE zTZrc4h>+>Rr#`1>N#6H&iG((0l40P-2VcxnYUPQ)78XuBd~J$FYmYc!-3m~8pKyin(4Y6WsFEkaQEiD{W%al zHDx`N*tY)HlzRd2pcY7nJw4oF&^{cEi_oM2$~~4jSOk zT9G+@%Vx+>3q0eQSvk|pFQo)xSKSZ(#SE?DRY7@c>n>3Z7Q8MxV5tg^98(s(tFne@$- ze@Pe=bV~N~`QrWh%UEEBJ5+Ex|L?BC@YRD4o z1jC8`(NVunD>AZM&&M2Bj3@|Oiy#9+ywg0wSH3InOzg)7@p64eZ2FcsdJf!I! zlOY@`>O`>jSyqP5N&D-02u5q77&LARjLcfu->gTR1 zP69eF49yNY{E@seMAL#W|GCf7Rxo_{Q=x4tL4D!1Ydk#?e9Mv#9KJN zosh3ALzu+dcJ;cGD-O%Nkx!m?W8`Y2>~Zaj7s#<(0^{@&kba-S&kwS-*xgIkphVh8 zd@c2GK*=&`_CTY@1#V*e2m*J(Y!vB0%M8fK5i2NG7UbWe;Z|Yv1%l4F=u5*Q zzO<{SeJdpfvm;UD%7Lce1Zl&z>PkMElY@`$4SQ})Fx*-L5GQ6JQ0@m?cxKyKq?Q4g zt`=9t!zNzVUzC)d*RD~KtwFLYX&i~4PCt*l{9D#PQC5`PlGCABNW70bm=*367xi+a z7>P%=rP72z^6Uti5|aJ`{Q{z&#+dzet6~eIFR%6Ha`Jlh=SXK#oE8j9Ggt8qwAPhX{?qn+7~!btyqq_w;;N=+vjq^h zY$0cK42)}PQ4r`+ULK>mOBh$)dMvuG&aQGdyRltBqK0F~_ypD}Vfq3Y1Ou{_Dnro_ ziHZ_}f|)-NBD?2p_5~VW!JyV4f#l;G|5nqoCgw;rIHDbUnqq;C11?7ozs_5FX58QO%iwuO!sQDRuLBQ9JSE z&WC_sA%<@+w>ReF$1dI@T8c9Xo;Zd%^4Ti!3&FlQz&O;F5w@Cg$84PpkSFrCUYLj_F~w$vEKB9X{)prwP}htqiM zGi%^jg@ahd=-#&b9>$osxAh@FAmzmT=gQ}Y%h)!hBKsuUvt2u!M(wX8+etdYGk{!V zoNKJ};;eE5hqu_>B03h*){B8eiiAYLb$M3F0tnEfJEQ`LQCe^JP=Ms~t>3ff2?kwn zsgUVGT;WYOSN2TA14PA4o&=F-1fhEmw?CC>Lk<>1o{^D{R=##@wugjW`+s>KN~gOq z9B!#`Nb`#-iA&{g-OQ7%>!HRpIk(BzphB!ab7b%CZYRTpziS1^{IMB0)>CrrU<-85 zj(dE3jIWE0#RAC;=v1JB6dHMTL+y37;($XG7MOwL{zE(a~jcE#PrV-vpspZuuF66%_ip#y* zVmS#2$$8a#gP~_jhsrIqi-F61U&w6!^*()ISqpZx+moYUGyc1z8R6DzW4~lq?)tY+ zv-@-i`9}Fb=}9b-FJ_(rnE-6vG&LP}a$~`!00j`i+%X$|&9Y^uCidO4o59cJn)wp` zS4V)3S@;EhPM=)oL#9DRoSfLIT%cDR~MEkhw5gam=k$E>Apv40SMK1dk16SK=o< zvSGi7C$AqM5YwIi&e%L3v({nFm^p4k6?0563SU~<+5&P(C~+Ga>e-B9o_*Z~l z9iCxa9fuyoma~lD#NQKO{!;4(ad$Bw0fR^kF=T;iheq^Bk(~0^gFp6SU0OrZ^8lgK8VlRW8&=gXSs(U6wlokXn_-&J1#+xI8SczeKxmX%aNK*!{a zXWR&~Kx)fjl2?cqu;n~AlFIma)k~B&KQcDZ&6op0E$jRI>HfoGrT-B+X=zLnT5`R? zn~7xYA77P{nn}OVXu{42lS$8~pgWtf{MLV4av-Lw-+7;Fd=K~IO`RTs5C?kEeK?B#ED-j5Jal!m+7+nid3}{e+wu78 zLg99};Vi5E!9oBet-W}M)n6-_jM|GbY!?K%{AQWGfugR}xMlF0_#!n7%P zwlp0%;5%s20nERN+uRT2s1XKEes0_vL~tX5n26IBfy$=uB}{(F<2I&O)2eO6|4LJJ z{8o8*J_BzCbVtr7WcaIKLJ4cZBqdacFnls#88TDxW#tOS_xln-|EahPaUajOke9yV z?GVCD3?wfPc&@kz=RKu+a!Y>s6t$6{RGLO~oFuzZd1DsD(2l)w`8SVkcxA<l7lyf6mAR6yv9+mokV`aR#yl^v;S7Xyuj(C! z*`uOlC^;S0>dVpw3W=EF`P-on@!@i(77ft+fU8>*DX%|wkCXw`6o@T-y7 z1%FPDP^@*x%fBkPotL|3U{ujDuc8+7DqVbofqtG4vt?XpcQ>oc(;2#TF+4mBEP$aK z#4R>C-XYLXcEhX!LIaZ$8IBUHHWW`z5N2PFDoQvl`bHr)p70zdX%%`NKRlIL%?nW- zvg~%xh+BmYI9CXuGE~S|o+!nNnW90SRuX1Z_!1K^()gz6f2cB;BjI)Q_Qnwcx@62+ z-R(}>!bTkxjR8aOiq8mk>(Aqgpy0H8#Pf8GF*pad0c>e9>j;}dM6TuNAGT>KNn2UF{r62SWTW!3v!Wu zmcz_2pB9{hm#XSl&XpI026}m&0g?ujE;s5Ful4Sb^Ye3nyPjaCTW7e7*)=qBVR)%Qxq69iJ-nP18-xU zMg+dG{kvV}44etGYJcGFJ8Se^=+x)FxBbOslWP90?(YY^;iRk$pV?~qcbx4sKbzpMo$7^ka z{|ezO_JFRKH$yCSQB=FvHtA;|W|JV&4jsW8)loU_XaWPzv@(K0x(L(&8m-1=y81Sq z65Dsdo5R4exY>6v6$5Hn2|jOYASQ!9`B~Oc?FFW@`DDC(bMP0fFM-7`&>e*QMrZit z{k!+a3Cgt3p<2(YA&*;kA^rYgcg@l_CC|v?-?Ey+KjRV$%Q`2}Td3iO9vlr~(|xh# z(4dcItS%xg^wm@yMqkmzjRKu?q%#s9zo>m)3(o<%P>eX?p1I?8#AKecAcYPxLcP3w(M8-; zEMqiH6>3!nMvV|i^}{Uy%!6%hw+5A2UMBC;*D+r8Q6*^marrE_j_MP1P|Q zy}RJ2%~!?>pwP%_$~X2Wgx*#-g^dLJKbS@lZKdk{% z9Gsq>2Fm*S@&GyjPb!o8s`GF10}vQGX!<-5_yq;wL`QPWt|~h^a@6$4;pI zl@SicxM0{C8you{)Z*2ad!(M^HST1Xb$xl6Hfp;-JYmlqeY4jbxs00A6wQ}nUf?ddX2(#3``Y+4s zKqLaUI4wwP510Ea#3*7FWMXiAINflQr?I}-1=v+fkUl>3JWHksXUqR$A8-Zr^zpw}%<61d?&mfv+y9N|2>svn&EiX@gxAcv$iGMENLH@P^@#BSDK(B_TsKg{xC2GK2` zq(_i!pvi#Z<;yUeX#vUJjFH+pMP*v7hJ$V;k|^ejF%&DC@N)5z?DGp=w1?YN@&&m3CZlBoI7_=K`+NFeh zYSeQae7j|m05Sfc$PFSd1X)~B>-0}T`lNSd6(E!_y_PdsdJtWl^`BOz*A5|d3HIpcOzLkUVH zQ!edhr4>%Xl(OLJcU|o|6(pY(>rp}TOHUCIeSLk=PC^BfWSrTQmjLG>d(SOd`)8-w&+VC$%M1;}1v=^&IcJtUd2QorE{81u1}`!ufuk|@Jcyx_V7{x znvdXvQ=u(gKVC=CV1iya)y=~_9}?VyxBn|00IXr62-%JC@i`qghb-62g#x{>s#{oZ4v@rfXryz@o|@XX8{0C9IK7V`D|f7F z;w}9NVjs2%e2)%GC^qOK|4_h{9z&Yr?WwmNDWL_DA=oP&(@Jy354*3c^gI4t=et^# zu*M~_c5$K0LnSE!ohHRZcgrXH0af@@Et3j@DSADt;-<#P#NdY^iu9f1@YjMUFQhaS%%E(i0$~Ili)5e$e@av6OSZKg}d_6)N((7+N?{U0L3a7sm|^d z=?}G6&LnmpBv1&8zkDSu+&%eV{8!ygl=jnGrWjA8LbI0nxoX^A(9Apm5&f-kz#U9r z{c+8jGSm~C_1k+qqD5&kUn-10j^Y?SlgZ{&-jzFsfD%Z-O6~?-5aS~~J9D^9OYt5M z9sZbq)ceuz*jS+UWTWp84p&}(|0v`!+b|kg{ipOEDel?~>1eB-|E9?M9l#I@l;fFQs(ou6Y#w8^gNvX{++24#|Ot{c5L_DhEE3X2{8Q?PC$j84{hwdIttD!3k0% z1WT+T&=3kOBlEvXVpya-$ z@3`n3Gzd|gpd^2HOo#Z{wlh5SO80!XJHj#xD+i`33}g*Azs2Kg!c^!5M3z@oR@EOX zb<-GCNGdyd3KvzQGbnTN0-gp9r997$e)Wxm{_ekaf8LYF=P}K+{2|h70=H?k!v%wg zJVsj;M+pe5>0*WFE0U;?8j+-NBTb)n2F9iqG^qm~OPd32-tQKfTuVF*3n**l*E2h^ zc^F+n7s{M}_EJ4(p%6M2lBiENFqr4L&3;e&CcYk_bA|^3A=ixaUQd&R>SVR#&m#4@ z2eg@1CkunoL2qj~2hyTRegyxl6wwa}z54gDKr|o2_+vp&R{=)vxh3lo;?t+zERC1e z@d9)VC<9;=pGSQg9WQ!qLS=5T8C*7+7C?ifG!1-ax#( z<|GT9$4Tl{wTTZ05k{eEA(PEpeBZ{sE*aTb`SUM%vGVqg%W5#NCk!@@$9x&5bi`Sn z>C6Lwv>OBIDltoat?|XJ7_RUXl{N_hKj!SNBytMW$wP7)!jMWtrG9N(Qrl*E;@IGD zMOcTkaA`q+{IlLiYfayu#@y}w$W&c#FektQ@s98n&VqBmMwyuS!hH=FRP798)oM|@ zhoJ%}W~E`xyB4=qF4`v#f*5`yq9sAn$^@lzXY+eV3!gKz0#}IfFI=Xqea- zc;TYkZKiJT-@ivp$hQgPaLeP3k zO9wjCa07w5vCj;1x<-;_&PX0enFEe%IQ5pH?RBhj)jucZNcl}~Rwg4Ike|Yix&o4@ z5WXxo_tg^L>%X^Go>Cf$oWNzn>E1_dq3IN}%@ix8@20T^!8grbOT1X#%rbKqWu7NI z{P&x$=PYbl+HZ!cOoL;)!TAO=`^j#%^ zk|tLhks`}eCX7rS2n=Xu{mxgW$8KEb$=-{m<>c8eu$rUgi?`E`@jA8;diEXBVnDp` z!U*LT>NM+%x2mvK%3gAzR6gn53dSlugLMi2b|cQ{Io>=?XZh0E5J{AMx;*?dJ?|87 zoIj2o%20mbH>39A(M7`w#F8K5FT1(43NtIYma0(sU zK$vJ2bX@6y9D&!NI{=daQLHd&7Sefu2a|4v-G{cx+KCT#YK7RhsD?PU& z1<#{Brej||w;Iy#mUhj60hUK7x@xOjK$ zON>mJHXVmIX@`G!SoWSDN_uV@q@UzL5xp$ZS1Icuo6&M&N~H~GeznIbG!Orkhcg-BKcJl2Vt5FA|fB}5ZE$U7uw$Rd@G57hBPbN8Wi)`Ln>mfx}R z8?Ia*$cjWD)wy5Lu^29kNheW8Y+OAXSy)oWD=8^paeKjkaB}!gvcwvCzOUZaXq4vJ zJZaJv_;mVwcv@~+PNVqa+&P<3b8kYK#yn`96?a3_G8c74s*a1Ljk|87_luW7rx_MJ(w7U>{p1**A$0-+!$4*>d`{En9{=NA}k9tQ>h43Vt+Z*<1H&_zNy zRdUq*xw$#kIAF_n>o{({$EWGsL{%d6H+w7I=DhIx&!44~!~Y!XfRH@Vg*!!cl$B2wSs*a7K%FRbQ`V&_#UOo*5ZV?^g2As~3s@7A_!P_d!obJ=Qs z+tbY)=_2IPTHSBqKxG>huGB{8b)xz*0^F+UQmBrsDh7fIC0*AiKr|fRV!M7-Et%ac z=Xp8uyy^dRzR?@jW*daCpnYmte`+#+N?5iIqabJ3>k@n<4dOGF_b=o{Q$Vw@gy#6AP4T0nyp3<`G zPgxo^#mIM9P+4FJe&SY49?dfvNi9G==R>d*tTkE}Pj`g^B-Kzn>`xTsieIm%el=b&eS-iq|DC-#>C?Ei_5{F04Q; z?YTl+@m5GjqQQ0f1yE<{`C}x(i+y{xm;vlKd`9~;^^7F%V`YI0wkZY(=C0?Hg|N@a z3i68Ve;)|u2HvQOSt^jjEHSCjcCjZ}w>?Oe=3KM_EiXsRALa@k=WCuZ@EW}D6AK=H zmxn$}KZdd%%8DB%RIaeZIIL#hQX<>>>f&yRUS+C*ib?Dlry#9~vvr4GS$gi)-B)|u z$9rSe#x_3E-v0U5UsF(!O+k55>zW8zc>|rUQ^yQhNd3Fk|KF42mmgG9`%BuvsDsYc z2OI;{`Kjsi=p&vpz9cs~9efRJJC272AnmSqoTNZ;FqL9*st#uIPaCC$#9eW5HfzAq z5-qp+FsqlODMFFpRdE4Cp(D=nEWvJz?Jq%?Svff<1d0H(wC@w><~9*&Hw1_Kovyi7 zu0wBc0n|*Xgil)f^gfgT)kN5RV(%Q(eEeIM?ePI2Z4R4TLcF(m|NeBX%cR8(tyWaT zfAa_0=Z7!ly7}7=^bfTh20#FqwjwKw{P0~z=;PhV z$%&Bj0{?_R@ZSKX$H1OO@_A65-K;`yO*DYNV6rA%ZCnJciWD0#l=)qd4+Ao3G|%YG z4&84?)lGRGOFrCGL$3g^SaS<-UeSnpfVkLiI?m#rTWi$_K$n4!73KMWwdX2Wm?DA8 zq?tVp*`(%fqIHFTfZ6>!&ilTS@QDUBq$DLQS<^ zCA9dENcyGGsghGKZWW_Eak4e$R6KYo7oP}*q!@+TA=q#e-Sd3F^Za1#NJhEv<+1Wh z=#JLYg?Cq$j}W#PE@la8YPj|bIu%CN&5f^rz!31ET5oUO)(BB=q)WQDMw)Q8-XV6X zjaPozm?lZ=DCkD|>)0W5#i{A*;jn4DQ6!3$u~h0cErf79*bKKNv+;x<-u)vLX>)*i?6wqNMCqtz3 z3Ont(UIUJK@I?tNH90vYHMQNWAoK-mJpD9Sz5-pJ3k{WW3?vihIgz=c(G25f*7KBW zMb302^o(_zQIrwHO+lrCgI$A!as7UcA9KawxrQ~A9qsJ1fdkf*pyQA7wGR$hn>ovqR9&oF=Oxt3N|(HaUsrn7PMLYOD44ML}YjFyBV|3r8pWMV9d(6A-Z+Cq8oT))qtf&!0bo zyUz{7F4x@_yEp$<7}{k_PFCv)>ML-_=bSeC_8Ls25EXBWRz=xP;()%6`}p|!){r2k z7Yo-|$%|!FQfC>WW z{Wh%oX;cvPdH8GBeS>Y%?jo#Io8N`)-m!J62}4iyS5lRq-`#+o3q3=MG})3zW-0Mt zzC~|`uP?%rfJQSPakFZ@#(9Ii?~V03>=|FE#JqM#(xRi$?q%)c0ddYh4a!rXhYIMp;Ne!^INi;PyUSgx60gjQs!*7IhV214fg)NMY0Xj@!HlQXvE$!vxd6j~K zrQ<$ed~xr--Am&ztdjZ|tyGYk`McJ#U-Eht!{WWeH=w7;>DhH4T zUjqD2+K4SEzHTBh5jdCs{{5?LXs{$PejHO!T{wrs;a5kC+krIUWPn8d?`FTSen)lC z&EL)Bu70yccJRJ(cilnIrTi5@ zb6;Oe{~I8F?SCTSvC(_EHhW4yB4ee_NQ~(=1N1|uVoq3ue?x*{G1A9Gw(=w4{;Lv1 zc96@S<8gi|=guXNZ8ea@?>68mo~I#p664fGjmxta9X4HpG=-5TQ4{B8*uwJ^lL-I;tEo6!ohzB?H=Q`$@v?j3ZPc8m%c}Q?7lanH;RY&HW9%LoMg%x>?gT`Xxg49?yoicm`7Ri z9~q3Zz4WP7*+UZ09mkI<3R5Xq6&v*|@u2!X_ZFew{q<~uzDGxUF`mdl{ld4X~*eXEPe$o}Cd!foD>SBeRCN<_7!5Mz`=B?0Si^DlN{L903=PT*?&FdY#Q z;lr+9Bf>c<@7&ecND{MIS<@x#fB8MFvPy&^^4yfW#c`$#7yy6cM*@iLc#d#a=#dfZ zU53uY*qFrQ#SE}R_Vo0$w6F~OGFrX|3?sltP^q&Y@UG~s(YtpiV+dG&bbPUWet?~) zCm?;QHsTcikwownD-zf>fV_-E_a#(|ejxaDb(M_Aj33Sm|Dj~Py3}OS?1E%b8vJxi z_JUTbv$nPuOq`g46d11Lq@I=2&FQ>|nCJ$4Vf2M`60 zj8RLlj%qYPNJxl3AR$XM@$=t12WEI~7)fH+Q~TF?gBod|R9KtD*JoE+Ov2?-^gp}H z{xlP@+K^||VX7)rG!YAGU`ioPq_8$ly1;{bb+Vc>i2&p*L4kqUT3L3m5%TUDg=S&& z{86lkymBUrs3avSw)C$t{2+xoJ2tQMX3aR(JeYP zp@>1l{3VGiwVuBzsIAXy%RT>&Pt~6Lq9OkIyW zL&E!OUcHfPpG;nJ63CgTV7WeNpDSOii>P~j+J3&W=slQ56jAv0_)@>2Y4wR$=rNs5 zrrOWL}dAoQ}o3<9H@6&87^Adq3-jwvo;&mBE z_;}C`%EN~kE^1fdrspOGQh@rs&8Go{60}yUvFqO_YUYV+u$e(1+4=d+K^t;l*x_X< zU=Zin^%0lk6~zRGV_i(kct2A^)uzTqQ`cDlMS_->@n<l zxV$?%8}aYCH-lY5)iRC*=bYS(+dfxRu)!{Y;((uKA~}=SSC?j;fOV*>@kM`a; z(FZe7`^uOx>blZc(NS*i;&KFR!@0kln#4s!)*WW=(J__`+YBm!!N~Z$h%ODCF6zzg z)9VWTXY1+=$ArelW>j}28P;g(^fh(Mcz`vRmzU2{-`o61y9Qid+h3&Do?!D3wmC1` z_s;EewNd!Q%vj(B$Oj$xY9M&I%!;AuFp#cpiXt(5+L1k86NDFOV69AS1%ka9l}!Z2 z2$u(da77V1l#c4`d6hNXJzh6|1SY$0yru6!XeT|_Cn@f^Jrb`CzmulW^|y5;%WK29 zn91Yz{fJ7hScEjD!qAooeewg&9|YEc(6bA3UmISDDE!dB5dhoL!@mG@_x|qVv8uqM zPxVq1{1h`q2Z`W+t0}>r#HcU5+Vq?l)Z%tvf1DF0VCg)6B5tmG{=8XztnQr|3;5J_ zqfdkF+v=@Mc-*+Vyq72LX&!$42fBGLqksD<@wgRD=w3|3{eR@Ws!h9#uO zfv95kOFKKkEQuM=sG==A#IdgpvtoV)}Pe`8BCEuta_!uhUr| z&MBh8DPOI#*SoR`^2PjM)Ir-PQS8)#WEY5w`FVQRJ{pux=lC7xm{rU(Vh~#VQqloi z;R?i1sPukgVl2A7P1d1cRq88}#O~9mSToAY>3;vv)Fex~|In_{VaKh>LuN>ig$hzB z#EWo|tpSt7$Izcv%Jk44*G>bD)h^dpLsUcGSsZ}Vd8s#&9?qRbZ zLh=$jnlff zuw!TQ2E7u-M6UW+g5t(^W@cO&qsm{XDQRgjtYryGXB?HteI33sF1j=W150HkPVdFV z1rVA#vKzPE9?uV*+uGW4s&haSEn#)Ksww%_QRzIME%Z(tZv!g-vHN!1-2mW_yWBJw z3=9oxOxxtxj*m_#s40Qbm>RGf23~yQd#j_VH933O6bxOtGB$-k+I{wkW;Wy;O4@(` z>WS8fBCdzj!QAK3c)++*W!wOl>ejSE1A*@`NUPxv+zwwAnon7oI#Vgx*}x!HtD*8G zO>oV!ZqSO=m~W)0$l4vN%y%p>U-^V)$iLHqmHPd#BivfI$KSvw5%?ti==rL8p*ZH! zLpXdsj!#Cm#i&E%=yRlitPGpCq)29V*WFmlyZ1MJK|@OQX5+(oLe~{@|6e z$Nagyg%>`!Sp8W@LVCTO%1p=jr15pYYZ>+o_>7)DVBoI--nf#IlH(JZqFRcf*uz#? z0!Ecm%Zt~XV7w{&$?i5^D@qyf#iJ`&%EvEV5DuYlL;6(=#FSt-s(+?7gdMFvYqJsh z$lSUS89^*aJz`>pG=pkMUg3LKsmx}oH5-?3PBzhXGmYcF{)B{I@$)AjEpd0(bJ9_h z6~pa#+G^I%!TL1~Z}+~6@;8ncCh&4h??D21HPc^8ma5sT2eyZl2{>L z`*;Vr(%Yg*+|X~HGt_rMyC@G|a1L3W<1JH-bx|whTs{yKYv>BGC;tR8aB9XTD69cP zti8RxaLWFlKVWk5{gV>|)t2X9=Mpd>C=dfML>R9<(MIs2_}x^oO1Umu8*T#R^;76Ac^fuB*p zG&Gt}om}t9Yiea9?@z)=tsNb&ftFCOHW%STAn2oI?ak?Y8Uq&=XVA}5z4?ha0l1jG zyE1wZ{s)43V5T;I7&L$ATEFl5ug1Ks`wq<_=g$3>Si@~clMuN?457;Lwg_*J%)mui zTh{k#r%#1SUw8Hm^;9}uR}(kg+bGr~@W9%h<7jN6@H06t^19aR72|}L{=ucMdB(;b z$H|_a9tOpbt0-Syulrk!4xlq8K%g#&!{#iEr0qr)Z$+7OU1~2meV&~2P}`tYWqtmg8emqJd!-BCAaPVTsC{UwOnV; z4iF9~Uki_ zrL@O7gA|>S6!0<~m$1p@8uRd5Ff~cC!C)vFlh{YmvUlx)ZhNzQoD)c$&gUl)c(3Ev zk+vf-k?nA&M!v^U2qK=FDK#uV3Da5R>Sho*l@2v(z5MKqNDL|Wv&D{y>zn##ax#LK z__6MBgB-a6Q+p$hD4f3}t8FT6^YMm^7Ih#!8#p1$*;nKyWd_bo04Y1k!&&-)4wzAA zBIYDd_V<78c||1!cDX&Xr>Wj{Pe%WhoYYB%sU;n@gD?@*hx{X^liPAe>vVl>FS)A? zDnEZ#(@}-D`PKr)>O(+mc5pCa*8m9K8n&5-u5~L&to1zH9&?vMdq7bH25*H5_{dK*~#nKNp8d!hrT7zfd&}OQG zj!Hn1m)BM$#DbbuNLWOnM;!h}rWrNZcl20`gW+W|_6ULRt0zTDI$EDp_RBb%_28#L z`~3SvbaDQj4;a$5ZsTw5HwIK*#r5xf{QRe-Tm(3uVb>`I4?6&J zP@SUSS{G0J{p!$vXULP-;Em$#D z74&UbhR8`lO(BOswdORq_btcv7|71np@}&P%vwP-g5~P~o-25|GqkrW$Gh+VxPyEA zYO{tS9S4urfxq)Us}uV*lbILd4`%18ZQU&p?tpIZhn-k5^FTrnS{vd7${*8&gW@yBZkj z=XgH3PdeE;*{bDc3wSw?N4%OuDArvBRdYz72{HQG9xu}t$q~E627u^@h(O9>1f)cj zm6fU-@XSjL9a#UH+faA+nwJ-Vb-CG-J8C1B1DYcG&e#}u*VWY2fa+jvFmi3?-}cKV z=HQz?!mWe<~{tB>>rAx5D2Nuf9h~7UlNfK zR08BwPx->s?;>f!FltE^Tzw_*-;>~;ZM(?CL}xer9T?9-w&OFaTZ}>T&>Hq#2Zq#5 zFGkp|VdyUY(*{1x?cb07$4mc5(^KnxDSdq0}PQ2)0Sn^M<-{Im478EGvCX0-dhZ(snXXQrrGW$!f{cue z7bv50qXovGMompKo?g$PwEnu_q;#p55u(g6-@Q zet+`Hoy_}VokK$LYz7x4?rS+R{)E&X=l3KNwDaQ}Uo@WHVb0xCc%hQya-r-(W zVr&uFtWII*2Ab`msnJm>nvUt^YS#$GbIlmDHgb5kNSN531J^sFSh6Z|AVUa`y84`atqd6H|?#|yS*8b!FGJmxR6*- z%9}RfQUCKqt-JKuH}zU`V&84s=0E=?txQeYb_hFK>QDDuo{X07B46N}+)`T@9-&H97E zzp6l%^A`FGO6*#&kH(HXVb#1_Ph;WtxVsn~Zsak)A3di!IV#D+$Y&HG3i9|UKLD}+ z{EKNHdX4ylOOMMZ3#4z}M#D$BK*=oypsvYFEm%N4@CqJw@y~?_Ib$}v<3Hx#Er|Fq z;4f2Nf(ZtK*8NW=e6A>zx^5b~8Sm_I^r}4685O!xJ?&bBG0nA=u5(1__qk7a2HMJa z(O?R8Y0F`D1-s-5%lsYks9fqOv8)X$D+OF?dFY}$g*7A_j2(POoq6p-Z)?&uAdvgf z50tRv-sg9gi@QFhc2u}i$dc7J!%tD&>TBy?)f3Fg_5NqX>}%TwL*`;EJI*pAXWG&| zF^$hOTs%JUlxk5>+J7qthM|pWJf8Oe3czz_g2*;qR+y5;Nij!6lOqG%=5`T2v z?+D&rSYg6eOmg`dvJ)-T`O53b<@)H7beQ9&fX1I+Y^R~;<(OY?)e6)6ia!MA8(7Bg zMn}#sv?a)W7Gkb6L{*fuHkdR19@?R!#q(;471@i#+>`zVnhR}qn1VRQFz?A835YYY zA&TLS;N4jV1b4poT^+(&4psiggeBaggV57$qY$^7oIXNuwI$T1dWPXhGQwWIjOL}%JmD<2w9nJ!c#7CjunV=L!^T}6l!1b^0} zYzuW$khg|K+9>I>zqkBycEO9Y+vo&u^PXhe2?Yf!|g6Dnsj{6+j0M5b)4pm@R|1I%VEXNH!|8IEsQW;8S(mZ&V5Eh$A-*^@R0Ah4nxC(g68VNf$vtRM#NsjrKJYc zCy@7oe$i?rqImKG3o3R$GAp!4PTZY8JkP5RT*28cG=(OThqwHDPe9%c%|!yA1LaQ< zm%GF#{th^0-&4p9zG69J+sF}UVRxE8{iLC$iASbav(s)y=G_T(h&SjlBo0 z;_kP12N&gr^K9+0GlH{IXyOMWMrxPbBd}?M$S8w31DYwWe5K<*(hu8e9PLL6_=8D> zgxYJ_X750X#1XKnmYxx67yiz@H!5wZ2ipjHwp0oKpHoeR;rnH>S3Z!YrN)l>4@3cx z%7jQm@v;cnWuM9Wonm4Co^*m)`_K~&?uJHFdAAQqp49xGVjSuYdda>9JVUYe&gDFk zSz2m8kWPL(;z*sDB_%Drm->z|9d+%%284sI1lL2Kw}>KE!tP{S$rcKEHz*H}c>G zYJ|+WBW8u&VDuxA%s;8lJ^f?JUa7cVXjw}vJ~L>jkw>HB?@%##p8VhPxDUp$^aDQv>dJ&}Mnn>U+NDWnf=wYRtX zK_P!aR~S3Qq>?uzUZ=6w5(v$a=aX|kOJf&)O@r|m>kGuZaOvf$4@>Y!Z!7L0#q~7X zy+e0kc&))rbGu9%fz2HKIlW8Ixi|hP;`qC-Q1aR(bLYh)wr2OcaNa1aJRcRLFeZjB39v%gdz`CDf(!rY{oa<4!tU4TgD)T&p(Wl^BxD<1Ykx{&7vHAapIn^ z_LqO7iP>^|Y>Eiciq_JuHw9JQ=v4PW)r0R}w{D6_Fpi~`FSZBzpZLzYKG+^kXxVRd z2)n^X`p|cg364kW*)|U;z4D&;^byI@V_oAFv2DQfRxF5DAgO%|^G1-qKltB->XC&^ zIyRnxN}hp@Ge@h#h>e=*@e7`oVpjWT)~Horkk zn)yj!ibY)0=~AQ*#mV~&#WG0Zx$her2^8{MN-m_1j{nwsU8+?f@Bm_G=%#_FJ;Po~ zrxA(Zi0(0nHE2c%f0J-hc%|o|1zHhW$2)!jBaWLFzRi4c(7^!Aod7gm5%-S#@@!O? z?EFNjUMkh|X%)2DGc(f!zAi~SE%ow-QzQB_q92?-I&M(lnO!vdb6>I^xC{@SLHl#`UA)Q@a zwpJdy!8a`qwzr_@gf~e}9EPqW_wZ8^0jL+^>-dgo%yy+{W*#P}0uHPW z4+(6l_|uaiDn%PVF8^}cnH`oCyp*8}mn>fj$!HwID4dj4`W(7}0E52g|BBsh;C6?s zj$7aU&7)c#juv2@W*PD5hMm*Bp>M5jN_02LrL~f!$2G4hO>PiU;^F^{s@SVSd)j>R z;GFT7+`&ys`6*=(rdnAMel2eG;?HW^LQ8Cj^sj!#+XS!2$4yB)udT;NVz7|0Zi*!C zrwoLv>-~J~fz8uY7qwjE+Hxz6eyjrlXE9sf-TEz;;kIMrfAr>uN|e1%^YLe@EKX6F zg3$eQ3=!{;M%UaW#XBhXiY|VbhIp?$F_IZU(||9#r@5$=uc4_xX=hF_Dq*KV_ko+! ztMzYR# zaK)_n?}57-xsW2^|ZrK zls_!CYL}FR(gD9e13gS^m7?h!%(SKSdd|Ruki{pB zU=;P%o3eM$&+{nrS#M1`%h(ajE(1WCCl@A<#ue{@Jlmt3L7V3v*S;+CV_GK&u;zXS z>gtYx1p^eL|9L1zM|B?NQxyab0lcQ~E^6A~FTlFVygd7J^;H37M)KW7!G*qmHv9th za`xCJae2Q{i&c+{(tOq--HaZXis}XQPMR9+JbEE848p*z{>rmlZsFQFRFRH!3Q3wjw^M6ZRLI%k6yVPcD>cX$X1#rn|;tvh*+7Z8X1w^{uzy zN(kC;BuM^2a;gbE7Jj7GI{PP%gx@hJxgtcnH_` z@oS)iF?4(F1m<{5FTDF#BhGp_!#-J>a`BHH|D4r}FyZ0-Q2ae%0?V(`4o4eu2Z|?{ zPWNmdnZ^4@(Kz9|`aiI^9JxVAvH<7U78IhznV@p`~;~{X<&`X2p zk(at$mk>BG1OK6oKZlv{y>ZGZg}&bjUjjzM8pMwZ3JOlp8w#DycAn!zwM1JSf_JB| zrE?jd^+lwqqWU(&)(@Lst{PH*`o@{OiW?0ZDJC;o`x4XpgIO)+hzI&O zA+2FopCOCaf3&S@)bO*!(G=!(VeCpnHi}fi9D4V?X2sA_Gqb5BUc>Ge!ju7^m3vk# zOirL2_WxP{R(;F+#K4%DXGQQ*B_@6&+J{Q@(R(fYA%@U3m!D;q`!~!ckhh&{MN?7o zkBz6^s;Q5_?whOm!v|-ti3t1NMPHB+<4D>;hL1@!kO-m19JE<>pGlUp0)c&ANz0eJ zF;NDek{t62^F2frW_=)r71K!HU}xuFK07RidVol@L2Rv?leA7zdhcQDNFO0qJdQH= zX~+3uV59aGE|vl5WwUPhRa4mIsZ3TGy%i7T2i)4Us)emG&7>Ty_4d)xnD6BF??|G2 z`3LQVGgKZGDR9iVz>>_c$G;eE?-vXaWy!lRGf$&dU3r1g)P9 z4H1cGxQ>>0XJe482}&B?O;v2~2)ro>vmTM$qoBbUoRe#RRhlN$WiMCWyeE6qe|yPv z6=21A^mLPgDg5`Y)y2VkR#eOwin_iMqHLa&iJ-@|xb-$*VN(zu-@5Fk%9FB)@Q z-tXG{AbR(DVC8BF1CrUpb~u@=q-(^yqek4ClB$>fO}x4KH+5bv;-vGin$J6unc?N1 zVpRt5j3GZ)XY4vX6Yr;<9;N$8Nn{>bnI(yGHN^umYmMn=IQfIPL%c+Y7SL0r6lS$& zZT5COzQ}4n>^0{AQ;E3WO_RF8Hip{i>(Dg6R!E~Qj=y#gL1qw5A^Gt1F{ zvJ81uRZqu@x?RF9`ktLlI?kJa_2DOpx>sqjhIi9z=S1b?xKUF(Kaz_W+of62K;7ea z=a2R-&LS>0jjMw{Hxh^d66^$Uk$`de{QA2QPhG5B9z54r%zaK!QZjhK<1)$|KVmB; z#I_~bK9U3)=h&!aHD|8OjWgiG+!E+ zcfR=he)p7=7OAhRv#ss#gCVE(0vBx(ZHTfPAHSDK=6aBj-fR0;_x7JI5O9il%!@vL z{1{A{HC~zbN8;cMXAcj)<$O~pG2sllIiRJo#QEIZ%Edz78VW&Q~8HP z(FKM79rmq5F`=c`?eq5#LVeD51P{{Tp3lz?8Gcb6^q;oC-7&%|BwMjqfq7pzVg#3QeTNKHGGATj`c57K_5{ z33xfjF_##o$P?nAcy{-`7A}EBMcdWRUpCc)xi&Y!KQ*BXvF$Ho z7gCY>Hgjs}5Tqa!;+FZjheyGx-?L7W{949RReVTeKja{YGTLP7Gl-{;Zzip`9~U+h z8&;EFb>kau4IWjyALACduZdcE&HT!bR;4O*RWY76o}EjP2SN>rN&Sd=UndH`@Hgef zYhN|j%z!GCCz$s@@xDxCuXD@0N_|pJ>SXCLN$}P{uk#$*_d+XMgBy5rRpTOjiYgfO zwT4XkW-2wJ$oC|qY}83UMJ5{>@RH{X&fl!lIV&fOmM$8QXJwXF=SeL#*KYfx9eV{VZfG z0O3Hxgc*e$VmJ8Jt5@K-pKJ8UMTJ!$*S~&!|A@Kf*dFwEkGU?EYfN?BMj+qbjSL}b zxn6T2D;-kDr>7rDN;U)ejiZ3r&z%fj(DK!%{K1m8Ap!JPlm+-js1+L`t4x4*xizcQg$vo2QXm)6?%qYBb6<&^dbpXkFtL3Y0dSbb|2{1yqB8Aeaf?V}oDZV}3oc zV{&^CX@LCv#jHI43Eal9>WNs&d`-J7A^_n9D`+<`$+L>hI{0=>TS)Z{nep8q&}+qa z(Zr;RnKUHi#T95N=2eKQbuwgiuN7#@NBDh%$7G2p?9Q>68+;sC%%=K(FJ~*bx|bTd)+eUb268v}`L0~`lyP#0Ppd(75I4RdopyR= zy1`di(IZ1usFIDvhjQxi21WL0gB=N-MQ0F`$sp}xzj2niwl8$&ZiMvI-br}!z@$*xYmJ!i{~1^bXBG_4Z6^R*fLl$X1kPriMOoJy&EP12wr zzw#h4^Hk9D3pVVtdT4~jEV>Wp3ljT&zmSiG^o_^VE^y!iIbXoW3#9Mg?+;o+DSM-i zfJ7=^I&gmH6rE-D1T46(U!yXNe&)rgw*tXH0M;_&!%O`_|M zu}ag;cw&S&k}1aE9`P7u#+#heV8bZ1&F1x6g+-r7mO5#g#Lv42^gzh6IHt zvxm;|nLx_aSYgL!6FxFFc@|uJmIm&u5&}w9K7<-UoIYWS=9WelbwT*wmW8G&y0CxA zRr`xTWE&Xh1mQ}MAnleQe1^-9g~Ps7rAaU$$ESa9L+P6y_rSRS)qt~e&fixW7VR%K zduH3u^BGzg@x6DKpDRsvrjTpcW=jlvL1q)Ka0H_x#5AoEHyk2LwZ~d^uXcw?%0N!x zwrGgu&m4U$bp4^5>S9T+*;m-3@6#{FH!^yOoZ6q8<$%u0QC00m!lLvNy3?8I1A>jda9PI+Nad^fNx44_=+=)S=`^`khEY9BdTv zBSAx6Ry#*_Ki|ItL|@5%kc2?}SovZSvzlG?ROr{pHSWh?X-*1Sw*UTjQlw9kNF?eh zr~pHV>8a2_@wyQwgxs@6S)*<3hZiPUMQZ-?y=%RbJ}9jQ9Y3ui85VZ6^p8z8kXEK! z#8(63h*B#H>F=oQPo4?d^D#vS62OQ6c0DjHaNl^5OfP9Gsi9H9oL`Wa*Lu1f!jFAE zP*GhVhmfDWnUDiHV))-1$Rtp%VRz>aoOYyICIz%%1sT>DX7Z%b_V z1>t|P*Iy~DULJyly4zwCDu3}f|J$*>MZStjw)S?4&y&YOFURortK$ZrT7|wSr*&~5 zf2*Ybk`eyd+DLnkLqT_}rlwth_|3LeF4zr-#`)BVd zDJYr0!w9==L_Z2cR=vFeFuC12K~&F-R{isHoj(^-|BMU60`_tu88#F{PJe@GlOL`K z1nEsr){8qXfJs2?s$W|f91}slIXmxtu6qB{Cio`Xsj&H_L%`8b{(Fmof@*=AM;4@y zu~Z3kQX!@9U5~jCw1)$`aexFFyte*hNF=LS5%$20Mq@2l0BK@oF}hVpis|)L=kArb zy!{b!S!v47`8_b3-5 z>#e2LApL^1)K2Qf*Miywn+k_-X_YbWKbx>CSK`Jq+zaYlXqvC^Z63;I@_bfYV`k1L zGjKq3SZp~n(wc30e7IO2%A~jtd55EdfJtJa?UNig{$)RX-sFBerz-o%a524PaH~9V zBmgI+5N{%gm^Dk~DR6J=uo->9Ljm2yq2Ur1(0}Pt@8^!VirYOBcww2Dq_Y=$-Q3);@y`dv=^`d zs|Tkx@FrY#0i71n(b0^g+;AOOk4zHY){`JEz`VtGD@lC$bHMeB9QJl;YUqaOwh&_o z=45~90Oah|_5Cr|@%BU=In>y|w6(O{7up#h1MmK`$$@hI@T%{XqTA- zY}+iE7#ULy&K&Jn>~lEs;4CFV^DurSan!h#arL7w#+AJ=il|o)2X#a|h-KoneQ=sK zs>IY-qOqWeSCq+eT2`L-P@BrtJ^~-G2P6*zB$a3>(tc0YX>iHV?Y@6mPRNQ zU!<6vsMO=nx2fH_jG;dPM&bG}ixDj9u-%^RCbuL>0%p*rMs zh03Sq)!vVv>%>mKx%JBi{QluoxLoJqONYN9ZD0t+^1Y8++m2@5fCOfc_)P_REeU>p zdmb#q|I6Gs?p((&)EaVdapIHU!>ZEA^`K{P{xE5KIwqz(ky@2gvwK6>m>!m6ZPSCq z^)s!Xs6ugMOjjih1#Ofw3-b2qY_Wuw7* zcQo2#HB@Q#VCQnde^heT9o^c$+ufB<>jJ2~l*-_y0Pc;O8d#kpyR%M?j_<6#OH#+K zRavyOo`&pw%lPl8gdZKbqy>3MvK~$>;1@pYe{EWU4w$)^{Pk}V1kW@83daiZvxom( zv(OZrx{W#7x^Gj*Fz8XBi3s6Xermw>-YJlS)sCMvRzAfAL^7^R-0TN-NJ~jw`~#-r z%hWfJ_5T>Chns_Fv@pcP4NzRcofVQYw3TkK<(sWc27$TxXOg8Fz83iz`rEO7kUrRU z)TqLt{KIeNFTHNd9(W4exHcHJUt~@}-`<`k=5AyW=PQp~d+9Wrj@_xt#}oGyxcSp7 ze)4wD zo1*`X_&l6Gf#t+7u34@Rrs!)eA)GKuwxNaf7M{4p)Nwd}`)Ar}N%XTnbK%B0eMCJ3 z0ly#P5w{8%fV`8E0Kqt6Pk*sH6;U!P*e3bo#1zjPY#kRCyu7LneRHsY4bgr47Z)NC zF$H0U>MJ@acVW?+;QVfd@DSoLidG=z#ziePlCzWA=44Q`xYb^iJ@H#gTa z@W&d)^V8h1-c7t3Rh2PmxRRG|VQGfD_O6qQ8df1MSZ6=5{4W0{*XzdrBfAjKtZ}}G zmN$f1!S@#wnk}RfBkZ%^H~g=u{b)o4Gx{@1A=9BY*=uwOq7rp9{AIWV!OijGDEzeP zZSKmjYIC{UQ_YIRc}HhGI^K7y2pl0?@-(=E+sNT|lkQdA?O8{h%@bq94(bBQi`gBK z@+pbyXb)XNF}`4@+9m0OVe*WlF?qr3cMDDLMEqy`lkhH@^%IqmX=`mIhG>Aq@O(I^ zg#LJ0H2XS$C?GOb6_r+|kLUooHgEJ;{)w)(#hDc{{l*@`tWELjY`E2s=R9H)Y-_-1 zQwp%D|AkMye3ER(5ZnIjbD&AS!+;5t%PxsC$ZvD#ZN4E7XUWV3@Vh9)tKOsMBDqJ6 zhX?9%&*j!^2h+BRUy_~RLI4z5;$F*2xDSoAlw5M|vV0XJj$$6FVq9rZ@zl@KosY_HG2vpP zH)~8+v8#W6efY5PT(&*5{|r9HAnAXBa;(qq{UKNTZ{P+;SN5p*9^>*;J_wfgXdNG_ z$*yLU1^Ca(B<^Cfq2sjv^c zc*(?dB|ZWlF3*^*G#XF8E!OK%!0`*QZXle-l9+C74Y?Ef&42;hjepeGc!;?KErdkT zBh%o1(No9{W;qNrUY|R&93l>y zcTd%%FZL9?ZwtKo_v)s={0#^;_rPCkx6vc)rS4^aNSQ`CkW_^i4P`y7uB?Pi{P|Op zZs4}$+JsDR7Lb`s{q^vRgtjh01G|w^2u=+DMk!1`bZj;8G251H5ME zqN)m!MD)ScYR2D$mAP8=C?^G*6dE^rNqlP=p}RjzD1}q*(nt8L(j#4dGUE~W8Z;6j zd~k4Hp+5#WIBqfBto5Y!C%@cOT+hllF!rpo=z?xa4rT4@i}74PN;$)=VL@ zA54;Vn^(acO>;{A9-njjA|i?Hp&Bu2I!@*4@%kU84de+R%;xF$V4V1|okB{9J!G!O z^MfLsX2-ECPhs&0k@%@+9E!Shdra~?n?AhFLb5XTf~t5B=HWQNUKfQ54eEvD8`>`B zOJ2@Q9lI49j+8KR0(w#hOouR`lb%L(Kr&jgT*Q?fZEP80TFyMcjh$;xh$U!0FV)4g zkNNNRW^;XCHpE(0HnVlSIl+$g+jmfcNJ^gfQ0M|jU{72g#>w%&%mruB_gvJtk^Z9< zBv^7({XX<=9*CKChJm+Yyhz`r903+3-3U4Fh;T>nZd|zsoZ> z9;r5}E>j1PWDQ_?6)eQZl%#9iLD*C;P<3+nRMrna|E0iE=wO;tx);zg?U@xD2ju!X zO6Kta6=)-YSMW|#Fo*}kz^N%ilu`YtkUav3`8uuiZok2Yh%#U#=)`UiKD#*`Yrp!gsp(eH$rk_m+*W9SV6Yi9`#PkJjG z{@0-RJRG?NSLSh!JEYc0@-$ScMmcCebl4NW>3^6k(?rqYFrw57`-rE$w0*f(9fb$6 zwp5OoL#fF>++8?*_qUk@zkmg9an`x?Zq|bnK7@a=wLiZ0{%_V-NF_Xt1(SkV#4F*K zsvvpSu;eQwBA3ipZqRVY0T>RRPO)7cfe2B`+st`&h!Mmz4nVjlQXbgS zKiz=Y3T-~<5;eu`ZgvyPm0VNkviOO3G+YFSe(Iz-9oH0cU%jyo8X7wOqpes#KNL6_ zlr?t`bV(K_KBxkG0LHFMyxJ=Ka~TDmq!{=gUGA2rqss;fgj>PdoEu9t{Q-{W=8vCi31(xd5lX=t6q*h*$Vze%{*F5N+3_VKYft(E$lM*)GDy4{h@3@C6 zQ2ATnZn06;s46-F)kYMbQa{t$VkiHR9M{vWy*hK)=P0rNtC+k zpca^IKRsQsGPT5ll0TrxI~q^qwkM5;;Lbww2q1i9EJRVQc1A5HR<|Q7drNJXRyXtd zkX3|R#1t(@v?d`G>yCGdf)C5}hM{1*Z%j@8BYLd4WGV{%G-n!;ouJ^Iov)e?nZhsr zoXBBCfSV9A;_yM7+XyZn#Ocmp?#Y@!YG~PbZR+FIy9l2V$EO+sIM0U5*{Azx^SAjD zp40gLvu))d?xxRDAW+@pGgV9S>9ddLQkGNVNlPVaRw5JXHB5T|>4BL!g)V@;4C`n# zI(A@-s5s?2On6>{23s!Oc%U=&eiR$4q=q)t77q5(D7iw`LuFPy+#VFWuHdgqxDrI6 zi1=Ntm@Fmp`w!{RuWGDWqghqsGT-hou~tgs2Zg@qfPhZNs|dI!LcRx(Hpeik;vTfL zwDZ8j2QbrZU&p^+UF}wP0Fr}0lMs65?kHL)Nri1-%a9V-`@Ddn031@w5GPZyCr{c# zR+a(4Zc2BPB)Dw*9aeFff-n31uu zu_n?WvlLJ)IwNsF4Y`wIDNNO0>pw#_vvbU+9q7JNFti1v^hg7tK61U5rI|j;*S&)y zx%l|_*Qv%vMmZ>>Cs~OLdvueNAjIM2`gU1~!5i6|po(cJ{~bf5fenI?C1C)TTS{o> z?&sHF$TRJVMj{`OZGGV*({al$Ew{q|z0`6FZgXIL!z|)`Z{8ywMOC*{|5A(`Ci8a5 z!})D5N7(#8Nb1Yw-gfgzVYrg0!RnDfzQ=XhyxZ7MxJ!QsFW!$?%KgN(o33!Czj z!kg+_VG*l4d#_)08|?T00CA~#V|tYgl#VS%_eZ&;-)DT4^5KUQRLEA(YG=R&RI=P8}(?A6|$Ry5%WjW|gAu0oP>|m+j zkw;3ZMsWW%lvWaugsG*93X|jJkwHxI2?kR-O|U5kV8M?x1-hiL`~6g;o~r#CkZMvd zpOSLq7QurOMh@JKjw^_L zm6ZbIcvAKc(}y{bd|Zz6GhZk!s6y$vG612h)IYP-063QhWkOD$1aA&p>%_9aM<4k7 zE}H|%Ex0@)e666HG~wAKgQS9JX=x$eycruAIrpv+DmCE?UB$Ts#@(s6@`qZ6 zGp4M7nF@F)58ea2g%~Y~5^?(LuQhKj~*#D#EZ0}_4U@1z-E7kWBd#SnI=PPq;k zB?cP#5@ZXS+c;>^_u?A}>Co)~G^KPy!bMDi@%7 zcAW(fZG+mmX@`HBw$|~8V)*W^eskELRoXrUn1fHI8D1CE{>S!Ez53-P^~1c|57oE( zb90@WK`e%XFf65fld^bgmpi3ZWBJnIe}NXo=XcKnH&^)~(mTTDyVkJAefC$=z6-2$ z+{9B0gjhJNd(0xqESyx(4{k?pV1u1j4crT|Kd|S%hZf6|31mc4(sudC5vrBf9>1{j z6cf0gKJ@YL=Ot*7(in-W| z-sgueTNv=yxL_PKvntY~k#n+z&V+1^4O<#&+_-@WIwJY5<2-tN_Gz#f`h4SDUt+Qd zI*oVf*`97rcZ8YcRBR}n^Myz`rd6E!O4PrRGr*HG^U|@g@^B!1G^*>kJ(5zw^Rz^y zd771fZF!4{aZ+X+HlZ|}ta3G*l$@A6|FE@K7h1aA0jaJB#m1rV85(bd5VkLuL=?Oa z=1YtV`;M27)@YOE;qTz8e0s1>Djp>B3j!uLz4bJgwyywx6eMVQEi|GGd4dI0A!-YI z8-Ud-<^Kt6o*EB!&GXpX-@Al`EuTUF@Fv98*4D*kaMF>Ol#~%99X7uS^=qi;mm3xO z=U&ZF)vI&SbZL0}KB{214%DCP6#?|X&1=+F@Gv9F-1GM4+Re?4C^6AMqdXn62^0xg zvQg?!F?m@rYW40w`~VuuA*cJOX;(m53ok~G)@jjo)2y@xAA$%NrI@G{Hy@wcX?IUr znzg*K;bD5Ps8ahGlYE=O-YNSxLDv~ImM6K9SooIR1_8u2psZn%dEqx%s?DjBclP&9 z`0rz!+7BNcLz?&Rf!`gF>%gdxae#$t}NH^mw%aDyTUCYbC zW}+-m+{GH=VQu1l5NtAk?p$37;7%;+X$k`GtelPLxHt2m1jJal62^R#B6bEuIG_Bk z`!a4%AKnBY&CfUc-(Y9EqRwZYXBq#bE{_$5a>wnh+41b zgCz5rx~1}&u!;@`Ci>tJ6Ar*CP*mwy&CDrUuZLm6O7RBXZ)paxB&D20?4YQ0R${{z z8+x{f&b<*V7e)BYSe(?XJfx$N>rq4x*y}UXl~m+1i8yEum7HcI@uGiiC3&dk(?#-? zN)e^sMUG4Pm=SfctJ;xp;?20Ss#2;w_&o5@L##(FoV3Vu4J+X0_)O9%;35lGpAzr~ z2Fm1+28yp<0irfo$c(hKwxLmS|FLdCD;FiCUmOuH!yTKj2mF5BOxD*%n>&E1&2mVjffx^G*;5DC`4;BpiC z+`O~@s8u`}mSN{xZ~K!&ZUns&Mcn51KAX^5Ba+^{0eGIVf!z7|1nsx&clna-JP?5y zD=T+t0;NON6qE2D8cSepR4%(>4Wt$e4fd`d4)<0}R%~r$S)KnK-Fpl8zZ1IQKcB`u z2`Vy)V2}JvA{)H%d#JkI=VGe77unl6 zZ0FDitoY2#v%=iW_z)$+xF0vsUAbi_i@p;t@dj2t&O1xsv5kf%-p6hRJ@v# zaJCk_BXe(iYqff-WZh#1X5z2x+6i^cdNxoP5mhDI;z1RX98(>jr`Qbaw< zdDaILN<*;u?h>=D(CDvees4ZYb~wlBNocrkbKqwXqbOMG~HDb`14zD$?VUQYGLDDr>@kOj*!yd*0{;7Dn~U zE#Cj4zQOS-N(o=c)Eg0uCPb*;e1Btkby$5(3)$2} zu3RszSXfv9tdfmE0gyT5u3T}jjxM$ag8<^Pf?zzzldFk0`{2$CcGV@%H%UoAA4w4d zy7TQoX0ygwNGZR1hj@~22;*zE>sT)-EG8x_cG4ey)ekmt@Rm?d17`pjB|Zb~=LWhY zke}FYE^v>OG?Vu}3o`8xgn|A?ivABP;Bk3Uyta0`U14?o+e!90%1GveBNW%p?gKJX z9-_8x;+7O|X=5R+8IM@Ax}LTQ{hMgD*lvdpxf)uz+57L+BZThba_Z)t%`~;&@ArYJ z91L&=cTyKUW0u?Xo5gLt7t9q)!!h#mwM-2;>3#JSV6KJ07Q zyx8Nq5Ox{o6?z$0wlBql1HmSBpTfphQvYO7F(4T2e0X~{d5f!-m#0ic)z42*Lj`?6 z1`(^vWF}y*?%7)$2;zsYoXzmAoV8GgD)pxkt2RD|;-;8V_zvDxtj>RYG%5R-aFtJ( zBTiwJ%5=db`{9G^2}nW9oRnI`ef*~Ovx^v}j<8H<#Jf9;p7V~WSdl8&2iL4v5jb+* z$q^|~dYl#1I^+k#3i&PFa=rLjA*3ljPx{8~Ax={0+sk`X_t-w~CXNu$r|Fhr`}Z^Z znTft2x@W~Y($W&L%(p=g#rDP{AdXCB%XMKIQdY$ykgS)pe+2RFg1qyux=*)R`BZnj z{AkzP-vapQHc7Izg|@q(Uw+C3n_Rhw)$>a^F=a6z5q7gy@mtcDeI zN{7DFrC53jmRb@O73D-jHdFw!pOIli*n*4lCkBIrMjH5}sj<9@ z_|K_w1R_8?frqN&1YP|oH;I|R0ahRl|4FxUrcyEe<>mPypz0?-z7~bNyn0&r0!Z#d z$$@0j*4LLj#>CVdm`_7a5AY*Mo8qB;u$8+#B`^Q|?Y{@4lQlJZD5K}7nNscCM$e@k z#XZfVqlc}3zV7VsP_s^$rX)iDUiA{mQ-)nk7LHQiW!tdpIF`K)sFJ+cD0{|WF?aD-SYb7Fu65|(OIB9gJpvkXX2FKd9G{`C?tjLiailf= zeD-ZC+*o;DE)_EY&%_&*Ub&ii8&hck#;#1|14|B7uf{z|Yu-Rvp z&@ZNuLnN_iv6y*!`))=LuB#(R;|CW@lu0|;YaD6g(w;M?jy=ouPkx+5x7qwX7bCzR z9Y{JJ49{C=Z*t&=3{_?t$j>t0ibUcF+Q*PXRwMGJAqX4^q8Pz@s~+e(5Yk22Z`dqd zQR~EzItd)_R?Xda4nU-;Rf=0b|8d`n-jo%1fFndkArL`1JW_hrQz}!!JDrtcOK;-7{63QbX> z629z0*RE^z_gWONgVVD#H0fD}PLP*8+f zt=Oh>3@%<>ZBLeU3odqqcMx|^BqS`uavg{Oi|MA1PU+vA-J3x{zfqsj$N@9AN()m{ zmzhs7k@A$blV_iGjZRY<7phwizX86Q2|DXdIVi|E z=q%Z-sZUE=`#krD@sjq;m;a^oWZ2gJI=$P1dV2HEkh0B6x!mBjmRr69gt=rB5zOM? zn@_rP>Xc)JyNG&hgIUOBjbUJ_d0OupE`l~z|z%=jV-uvZ<8@5wzPk9r{z&C%Z9~{RXx4rta%Ba6XX0=oyeujbI zcSf*CeBRYOW0qGRiwZqm2=r*iYz5jv<~Ym!RP~BlwAwt%F6mKT5AiyGqufKBs30>< zc4LK3*Uwi+d3S!>9{i2AqSl_jrT1QFDq4_+0)8mZtN(IwxUpyLlwZ`GUJ9g`zKTcK z{cNzs5f`36r1az$kk_obB1hhkW`N%+PSu;yuizw!v%=N#`8I*ZB zKR6G!>x@hHP7)|MLEt)S<%;?i8?r7DTlFNRqmNIso-ZG$YQj$d(N_i!frYVLfFgRj#d6DZ@)dgSh5l09prBCr7THxCvRw5e8&oXM zlnga&jbo&>Cu3q_0Aern&nqV44+FRO5V_Ud%x36#66Glo$2H;?q13-Lx*O$8M+J4t zD0o<6;qSFB>1nJJ-TGzt?8)=Nu*Dw*@Yz;5s?MnI*Bf3m;|q?FE&Mlo8`5m*CeX&F z>yvVi6*Zz^)?cG=lO?fZ*}%m;sBpkRE?k%QL`Qo;rnF^jpzwIWLKDpFS^@(*aAW9p zN3r%FT`iKSO`7%Bj;Lls6`ah}z^FT+QfK(`unaRor}z(f6#vceT(bY=Ok|5Vfo2;z6W$Is=IYrA zid`vHggT2~=F;cjBT(*4fF!xIv$OH>l&on`&4Sx{#q4g3Lh-b##pj^fAMTdzFN1ey zP=5Nf75`NrK+4?U^0e0EAJSYs4Fp?& zmsM8>&{m(pi>sb(i#futnE{SEaBThO5TC8G=t%oQNB;Iti&Dx64(mc z|EdIgc#IYNr;4x%t9}Nq%y4Tlj(PlAKOPD-O7Bke$6mY0C z@6J|(K7xa_H6SIG8-5p+0qw~)OQ$?1GvI$mPrHKu3;d-!c<{YuMfo?I7l1#ViHSt` z+Si|hg9f!GJQ<#gpHp4-{NDi9kuJS#d*hcc!AC#W*498*0(78)g@(2cfhGs!M}Zy% zAoCl3GURidk-(Hd`{uT`wy49eHnz97+d@v0lapg@fO$T{U}hdrS1YFH_fDp!rgnCA zz&Ez3yt0~i_K5y3RuUw_?trH|*;|N5Lw4(E5T*QDriMq5FjDR)NW+YrV8XXlSUT-*GA4g(@ox`Aa- zr{rw1ol--e1V*P&>XHAkYffo~hVmUOTh2VLXgwaaN&5g#PmtkAt*lY*NjmdDJt`h# zZaGPK0g&L>7%#H*lTD_B2KShA2GZrjkE^)wvp5Z8R6Mvf5O6y@u~JJr`@S_4X#hm5 z83q^FE(ggRE;$_PFlB@g3vvt=C<;UI{+A@Ct*s4qpbYpv=cB#>&mIdml+2w6|CG|C z7!nb+e40*}pEENv$qdq9ym6)MGg>iiR{w*G^7VnS{e<&U3#XZz2QjNgxnU6~o*DHT z8zM=r6l!Wy<~uK<(KGLeBd9f%S$9hQm3Kdzn5GO0U(K^4t{)yA1sauSEJ``0qh!0Z z4p#B@s0lM^%z%m2K3%_0PN{i(!F-A_*Pp5W5yzuFIHt6$4252%FLTK$DHoU17dU+d z@3ke9A$<@E-m5QF3`NF4p{ovBf4MRYdVE&{~H*FKtKj1C& zNn>YHIacYO)FT?rhgb1_xzd>ssk<5MgN7ETjAh$3h230x3f(lXGfiVHcjAfF zgw^cRuij9l-t1|V7Z-ng#@%FwLOsWJ^T^zm@Kly3EqaRjTMx*+;!$jDy8~pVs=rF0 z1<(6nA!EcfEOJq5cY4j2Tb{Y(i0vj_9{1tOTcd{jawM`GY5dg|B_ye%ZS=UDQJ@S% z8t7{%>)Tg43`k%I)RYvw!EC(*s-6Ryz;?uBge+12L(^GDMVbEZe})_obm)cw0qO1r zk(L-_}=_}=ffYnd-m*dJTuR8-|x7t>xH1C zq;%waBTI-@1Q(m_{ALdg_)wdpI3$rP>EB=OeG zvd!x4q!*i!Mw#h;+HGB+N2Bv6W<9E^4s(~bt>gbLi8hAc@+G3X4(x1) zgM<%W`#kyaK6%-pj$5T5Ld!KdA=770Wie#cxkdJE3&|8e1z&XEhf-9*9q^wqyC z{ZHG%cTLgV4~Rb*IXRPgjjO?{XJ?&jBTM6?H-uF_KB;ElR1Q~W7MJz`CnM~ilPN02 z)!&DM4D$IP@*w2(up5Bc&sAqM80UX)@*C;zhak4PO?j49_Qb^ngOnBwctV(_NQItd zbXK4O@ZSqT>^D|=oSgoBTZj@ zLkSq9UtzE0Cb-@>>b~bc?j5bjk3ga%hz`?`E!BU;}|<{d80kWjX(V+gpMn~aAYAp`_Q*~;YaYzL~R`2bDj#R z^Ip=Uh$`|qYvGnA6_Hm`8tG^xhW6Mv&Rw=vd2~wntZEB^ zBWw|;=rRHYsyepue^vMzT`Q{169j%-!|+({U-J={S?~l8hO+IvOI66snZ{E(8%n$w zw_$bI21R+x!c?jUG2Xv{%nT(Zi&`%M#r)kiNa(w9pZPa)oGvZg#xqQc<|dv_b%($8 z|MvVVF8(;4X=hGVWV$9eNV;`-VBalCDJ=VEd$#9Q(*hXln*w=b_z zap+`$LDrR%0OIK2gg4AZ**XDFx?~0daS?^~r#26hh6YL;zs&GbwISQ0gtpGaFh#n` zGQK3^DQRsLP#?3+cjG|2JXgKzByFNsB;D;f;9nrGo8_A%!LI!%2V1#zIwd!Qtgn3; z_D!`kc|Y$nB0{4&Vh}ZT`^VGduDZu&Dup)Ok=?ivI~F*51D$Fl;6BiRM=wh=U4Kmu;8(w;6Om%N=F{| za9aG5ooFlwplo{g4g!%+rhc$N2iz_Eqie)_zub>OO)N$kA4~^uwtau!^nG*GEVzGj z@4Z=3q4Y7b@%P4)K-1%?!>;S}^kXY*6VzX*6Xi?uTNDvTuA#RHRELpdI@7re5R)wjcn_!!a>*=H)$p`=V&EW{6o@|BFyPZ3a#M-_QZ=}38zN_0@GB9tzPk$=k z=79;yM*Ri$824e{5_X~K7Ggu!RkOUNDL-zT!6c%VDPj5grzT1g8xumP5R1fh8Q9dC zyaW!NRKk|8m)H4lA_b@)$2}fFPQczL=LQ$I28@RE*Kq5NY2epTTY* zc{A0>k31w4GI!v`tX1xnR-W%FoXV-=J>^H6tcuFQQ|LCyvyy3Hy84;IVe~Z z6a$!Rfz)xaE8{-yJ|Rc3X3)Mw!w%-47t^WEjVjLuu5zXW_t!0M9+r3i{ubVDj|!?S zjDu59D=#ZQa?*(6H{* z>x0=}k-eLAXoTTDZ2r%1ILinb$3F>MswnO)l1+lxe8<)IZq0_q2KvVJ91Yh`DX-SS z@k4We8FEci%v3BvmYhgkiu=!8%CjB+3r289YI&7QXm<(9kO%yt67BXmvtk+-P6+q2 z@0h#Gi@g`Ey9KvjLHzQef+Q2<5!7hHRE0Yb4pq>2=30*I)_=G6i%v%vaPkEwy z)6aS_ApD8vP}#djennJ>4pcNA&a$id+S2Q<266N_uMelCtNINtg5Q|uH|y>6I20~i zloLS05okXNQv#M&g!MuZu$WIiCYR0yCec@02D|SmJ+L09iL*TSrGvN5??Zw}er^ z4+&_ikgBKuap>@Reg56+oU?n>mShG5xr(_T z4l(Ata($dJWznL31t*VpHS}&NtR)9-Qb~9RiCiBY2GOA@1hJkLXjn?@uSV`}?ynRY z4@!G0D#eIM2zb?uxvWGVMK7~2(9s}%vR}a)nrgRm zu6n+;wblKB+Hr91Px&m-9UbD18rg$+alG7m5c@G&@+@as>}-M;t#-HNvT#+jGY3>e zHz4i}axbH5i7M_(d>hiag((eb$K(_D@1cdP6C@-wu3rpK1$UQxTIqUprf2b<5VF2< zJ|gMsdbR_8<fa_Y;Z>Acz<;%{I9 z_ps3A3dd($XM3kk45%mvB^>T2>J<&tp%E2ptBvEITc5Wtk%W|q%g?x+cxlBj6<0d+ zRjw_Z^{SQL8@mM?C{IyEZ56a%7cA+S$hF*8JgW|s$i6x1a2`tRdgYnG>1BR?+qG5IlJXpGe{1V27-e z6!YS~X6@#%wKa3M@V*O-7XePs-hN--%CAhLUd0HA`FTKNHL-gJ0C<2O1*$24anT@5 zMSdOKbf@E*DtFt_cMddP-o@|1&-dWqkNSl5i!8xln5a`+tS5{9Cpm>l$UMce#~Urd zthFTtqmahqPM03*pvI*;=w%oC@f>ux7v_A=eauzZvv?#i|7n%OJB$2tmCMoLQTp(T z6*dZlL>!2BU6y0j99G=^>k=dT%A9@rn9HIz1U)oneNTLrFEPFJn=OD9dv^t0*AXba zSQuVW0ARmq)TA(1&^SlPtdh}mb64eaV7jk2Nw>tq_4UXSrm};O2c8a z5$h1s&O4Xslk=w3&lSg?X)MltFra=qte-F~k9$>UqK*hT{5Ts$agIwM_mDTf?Ha)( zAf3d6);y%$>Hf7S%UHni^@%?q1ALz}5dBkV24^~DADF^hKCN7-BT|lTM1?_Pls72t zc6n@y7DvWklf?eiwYHSVO^yN79ydyznpUMU!z%4JiT+&+za-Dz!kw0Sw*YKJzR=Ga zT!WZ2l6(>pNB3yda*qG+p?Si&kdM0T#dH6ASD{z^6N&r%QSu!OQV)hOU=<8<*RnZ` zI)!;(HOzzg*f&T1Aous}%668vAn`VSohW`NP=Y3kc9#+^P80$Ea0b7oe+D54pJiXh z_Oo~LH+H)eM}aixer_JArTB8uI(WNuyMCwQcppzaIzr_le${*X>*wWC(e&HZ#@i(5 z?SMN3fzlVa=lm<;?S`=fTVKqc3(beA-2@0C^Jjjjhh^)Z@-L-amQ&u(vMmbT4>R+@<{s9alQw{hF7E3Htvu#u6q%YsTpKkjUJs`ZB@8cXZ|%_}~vCwk!t zMB&x(rva*^0D_g@#Wt85cLSS+=$Djh$3|;r*-m{c-F1a+9*N>xOFgTq>vu^Gb|b%6 zUR@z%Tr8eE4w_gYo3-;gN(5>V;1%pzvE|!8_ivt$TXJex%AmJvMxH;_GEQ_Zh$&tM zPA)K^ZSmNh0#?TdYa@_Fbp%pFAk^mbYh5!{wo9KM#z52SYW@~tM}3t?JW!WZCV z0S65D88YX_d0yA-oH2Vp-5JGue%`L6v?~9mHoe7%wx`hezake4LEh!X1x#iKvwzg@qfdA{xM?7+1#rT`} z#&~Jt5(~K1S{Gzuq^e(*lpJr<2dFD~y{Arr<5dzI_E=0j^l+f`1Qq6Rp(5aPf|L*t zd^4I}QE&9d|F(6p)@xzk-p|j;?eCFZ;GQReb-lp#_=37e_hDQ@$bm3o8YO>~JU~ppbCuINR)KdU@4F<-Y5yIE^gYPq_tX z_nm29&BXPD;4<+zqx=uW>ex?u@^cpQMPSv3VfWwYEW5 z33avZL_nuCO^)_h%@NkH?!!8sl_3*5Txv9lVnP5H*Q>tB!A>N5OQcK*^10?~xbt9V zh~o94M{7i59gPZgc$B z{oqOh;4z?r0ZW`Atk0)^*DSqk$w$7%gk~H02>gyP*K6zkr^LMvU z(rt4PtZTtgBa!=W;N3@>yAzitv3*(3VB0DSbvfxDs)3xLbM`&*%;W;P1Id5vs>)=oq_dQt$%%g9~ zLBA{p0_Z<~NP1n@+DQ=IBkA-e^S5Ba_M-@J6WYErS$%eBf0X`itblE0S>aG_>g)L6R>uGx3& z@tLllv0b!nq#tkkjq)}0KU}CWC)A=Dt&cP62mZ(HC^lD4Me3}}4zyExKMogXXKDn! zY53$mCMP$>5-EiR1t-e*a8shiXq%%Q-_0NYtm@7X&C48K@mX>4H0yF3%|dy#nu?o0 z!t=X%^(MF8Q>fFI;^%G0n7uZy+jcYKhV~*;t!UE~@nsxEv&OAJ0N~B_wSPbWh>R4Y z4jC`Y7D-B1Dg!kwkGa3<1m97uvnP#dqVeUEXW`On`{LE?g8Zx6hOQWUcdz|DV-lcO7j0aB= zj@fH7a^^e7zZ~)b! zz;bvoMn+n$cd7J~JQEpZ_oe#wzl_sQGdjY%y)?cBfWolr$MU$^uu2Mh0G&kQQk=Sx z?Oo2e@EAlYU(tFXiJ^HVe{AFf=~9o48e=*cDJdx><-|q7z{|t4W0Yd_4ORG z`_$82(=I`PReOJT8FJaR7Ri}g?l-M=)V=%6XBA*FkNp?BR?iA9Ww+PEnTX<(w$%Tl zC6Ao*)ok81lTi?(P9E8UOo5l}LOuc>{g~<+Ag2P>`@{ajEcDweBN-WaCMPeaNq_)P zLrn&@6@${w2^A{j!?+YXsdhM*67y3;m1mq5k@a{R)p(ocPi-9^LD~q7VN!k^5R&|E zj5`M%w!#0=Q&ut7N<&(VR%@3+ zyI}8Hjf?*z905a_LS0Kw6IBg=w4udVqD*3~Z~StI-vTj5SN$jUNAEFt)=K)Ax91PA z+yXM7P7qQ6Xj(o!2S(L+8CH5TF(&M?tsBS^J?55tl-f!H3K?uUeQbe8+s?(gW=OS_ zi3~*>-aWR^9-AYrhK#W?022+A0J6@&MG9)_3&VDGCZNtwPDn^dN>Wu-)neHOXTKJU zGmxJW$CqusHo>^03zHsluhJEOnhikEKE_Xfz?@?xZbq}fs6vOP{IhepWV&_AG}vevLIwkg&x~GrA{q0N!s-{-4R)-x@_7#1f^`m@uQah7vzy1t4wo-%@gpA#UpWo6Y0qu zNjqT+Tvyk!9fQw;B5k z+&VUT+9LT$!*zVjU0!FCf@{Gyhf2UDv-dm51grM0m;dQV(%nv@d?aB{PtS4CaR7UT z*@7FDMys5a;{r;Z!ScX|w;9f0DGoTE*(Sk$SDQ0BCSFG1OI872uBz(J*a*iKflh+S#}s=ck^7DqSRw<5Pj?@=98e7Xoryf2UX@`#NMtn zj56a)uplAhx z6^0ljlnA8z=ztYMcv~z~UQb1dBa)KjcL^wLyhrVu0s!(e}O)^Qp`Aa1zPu=ap zt?9eZbl7KON3v`=F${};+B!cXtN1{o4M>CI;Ypj`HSynL0Ez3b{>D~~#y0+LjQ#!Z zhh`P>!bnXjF7!rhJ$U_0%6*^=o>${*&Jn5U#feJGj6xSlC`0rgOSZVw_99=%uVaI= z{l*ev__Vb$0f9B$21)#HM`Ad7T=tDCF;$D=L-V~9|sWoC#P zWs{pskj5=v)e?^&wPE(B-eNS^-3w`(L0-ynd}1u8--+5BiAl^=CqD7ZdOvG?OwL^;9svEZh|!YkUT zNy8*^R7mdk5+pH$SH@})o zXg?>mtUWyGA)rR$-xV53PGscyw)tZ)Y=q|iOLng&OXl5=l0`wX0!Q~)hn4@TC5!eZ ziGN2omxq{2FG+$9Zrk0@+%eD+c+~)X+0)Y9yvdN6MpEDhdT@s>OGaEK^4qyMH5xvg z%5W~Pjt=ZDMk7NtGezEm=JwJ1P{J351~E(Ak7-dgm`DyRD>GI6BT?8M73@Oa2s-<3 zoY0;-yQocn{=H>W*y?ic66^%s5YYHLF2xXO!Y0SS!%eD%HHnXJ8o_O?o~~cL$Hh>6 z2eU}vEMvHmxd zVOl*2%R?X_@K6*wh*R-A={_IVSfnMQZE{(L!J0w2EO1B=Cmjisv9-;~u)O%4UHG%L zv{jO+A1hQntZ*TMmO1sXa*rKj>{^Avy4H|ho!2SFKR{_IKd0?gXbD!%xM7;}R3292V7jL#)7D76<^P^N$aBA|v1cukDrx zuvEJ`eb+>q{Mc(D_n4Aj*VbG}<*GD!UZx|*gavU*YHH%=a=*(vBwn1KLt}<5Y90aN z_MAX%)HQGVXl3Hd*%qe&hPKn$TEf7xs)bV3mnG<-ad0@xQ;l%0|5BvJgkq#g*EIy5 zr*#y#ELry)x?`o{8Th{3c*`3Z(i($HY_e;}^eB!`@AFgD&2gkO1RzLhr5T9Q)Uot? z4CtNeE3{Bwj<%3&o)m#I`;TeKVWyhy3(n-Eq&r}7=)2yp*`>0SDAWluhg7PJ%Na<3 z2M-k3(^KFd{A}jHNC7mvJu_XD#&F$@*CChxj>(;3bDkA`%Bh(V=)$6C1h%0aib}D3 zu1I;m^%uBP9cj&A*gb>u_TAUh-&-T|g&L(9Aig4L3ot4zdGy$bGh_Px?z{mETa;-6 zB+6BPNZ-XJhhMY%IYks+6jFZsS1!N9hrmj%upD^t=7vp$l9!Lqbk5l_lg6hrF>Qx^ ziZ443b>8Jib9Ma^{Th7^mQfjm5S=fC>u7A(nHj4F^T7;|S04(`I8Obf6cBzg@XR8>I?tthuq#`Px}ocuaMw_}slsrJqy97Q2L(K!yTpn*i_Hbon8n zX>E!#-y<)47O21*Ac}g$O$j*>bFqK;*#~8s8O5?%(EcmH{p7|ep{ng>C-$7lQ?1-f zGcOl>pDM*C%RkPE;G#$APeG*gu0y!7iXvd-Trx2kgn0@B;)1S5_&EoT0oudXkBUs< zrCy#Vsz3UU!H@=La9MI-)N}|N@-^g=N2==*PoiAcM%^0V@+KKh)P6ywy z0RRyzb#nIjws|8=Dl-kKXL;x<(Rxv1C-sQuIZ^@Jj>}GpJcjuH5!`6fTo!8Mn&s(`771(};b6O~dKR)LFpMhq{}@u!T4pTRIeyJaas<*t z&=jPs=EXQ+XqEJ6T$Pq72}-D@@C-g8@1n=>C%e#6r1a9$kpaQWJq9B-%twjGLSLSVD&;p6Ye= zA7k>p@}+Ozs!F-+Gf6zY)ux4xm)$Oa{x&Yx_$aKFTHBPy^vk-*RWHmw8JensjWLBN zzgHY6h4k;>ZeQ}6@hFJ6+Mmtm#bz3`1pyr<6*jH$=X zTxD|Fg&G&}|H-Y)W_e*VDu=An%vjbEwg#^(K+}1tHbJB0nuT6VG4cK*JD%HrUSzG$ zI7=q1v~aq|jxOb>L`G{E@gjNsISO66Qu!;bppTa*V==qvs*c*s2%f`uV=+3OGzIly zl#T;D{v7=v1exrm{HD&oRURm$F z?%hw#-V~Q$=90VBjSaI!uic(&(18u@a1FDKnZT_3J{SJLf7eubra;gPQv@4{}(dw?g zW;(FQMGNH2RD3hY$8^bnikHA`apB@7XO@&H!|$I*_dU1lFfcAY9_Oszj$Pj0pNjt} zrmtW4ubm>Qk-5jcV+5w7Vm*@@W8OSDElvil*>m3;%3tQ-J$r@g?EF=&oLK*3ETq2z zcNis9u^$EtgJ24wVOplmq-H6H5g-NLXsz~vo9xkooB=U5`n(0>^=96ZGV`08bm4Ay+J;_Jo*YMlOAFD@ zl@;q)1cx|z3XD`E0LTiUZ<$Jy6x4l#5k!jG#eGGSdeT9m4zJ2`ylo#AdvWc=NlWNd{jE)5$uCr=@*Fj@_Pl#ffv!cNVQ5vvLsB&By5?#Rh%s8GoKQb0)R-0^~ z-9g_>LrdBZrNKEz@z!Y0PsHLk$Y~-^8Y+e0u?T;aH-*d43jN~~Dq*u$ z95FBa2Qt5>5<0wInRuOMA{Dec=n4PLU)gue1;HE`o?BZdyd3{#pJ{&^U>Aj!-YP+r z7Wzq_jEycriA8&fSvhFycETH@7kr!tdlbIE%Q|KqGW!Hm6+o}O>)!FODX*&}0P}Is z#?8=0CW1lGgCE(RoFr;)$}a{)*FSvF5`ph$>VIk0W?Vw(n>vn;oJZXLh;^e?#z}%@ zHWMNCxH#n`@|`>18cTKllk^ONfgdU()<05TIP9gn@@V-nq)9`HsP^@DrT$lDeYrRFEj1x5m0hkf_~a~| zX)wgUW-3lM@R?S}^uc*Q#J09LQ?;fwvnkg#erGvXFd3O8PDes|;l1lA_z|x&^b$Jq z(9rPTF(q6*)*lhKBj|DB|2xe|6x5u?XWq^`SOAZvTJUK)&FxoEh{ei= zBXg&92>!#O?Jzt7B)d_|Qzw@I(5?Riv%ZFoom?=vx&!|nu-2)Qqpj*S3x&7UFx~-= z1W4_W5CeVXC_Y+P+;1$Z4_Q_ZGMwI@#qn`N#a4Q;vUIR;*SuKABn|AhhM)R&!WZvm zXm0VqS4K~dk3}0%{a4Io#$^)Q%IXq|p<*(|GByP)-mqmfjz3@m{n`hN1t{0(g7X8+6rIJPQ~dav>%CNw(Zw z)n368!z@M0M0fB-ATb03A%udpfnLBVrdVdCUid03x3s|9_>uOla$6niO-4zttq}~0 zu({^k_M~OQLQ#ZOdDR*Qe^-SXGY5t8lfOL3K%t5Fp24URKfkbs9FtOjK;f<=GU1Ky z&FBxW27jP2^;bTRH3N2fGtvZn#tyXXU__~@EIk!#&bv+SgZtal`;{E!iwjS0FUB-C z-PR_(*BthGi6u#FYG|cU&5EDzJU{YGh|cDtGC|ZaKBDo`lhW3%Fn)M(h}w;SX`t)G zB2W++a2DE_ln90XClNGgD1^2CT5s|c)-J=yu-e|@m8R0+!dds|hFR-v)}d%U zutPoK?0z@oX3v-Fr?(drs@#j&{|qgnGl4Wh7he;(_*S5daPHB2iYqu}T4*tE{^p0v zZe-(T_zlE|l=OKO@|VI8ra&!#F6@5W1+ghM(^&(nhWP0OF`_ZTv*M4L)Ct<$IjE|8 z96bnSr3TI06mcP~y=xKVIFM9Xd_9K^&+o#a6r}8$0vhzH#+h<1;t|1P5 zx#&_%5FOZD@H)d3EqaXI>!?WuZqpM(H0fAZIbV53tkFoE_s6@p_(IV?5ctx;PB#8$ z&>!0V z-!^9*37tY~*R8qOrfl)2QiKfhvBD@8rcfk37u8ZWeF_{OMsM%tKZCA4|4RxMYU}bf zWx#0(*w?3501KiWOu0bNBrf86HdFd&p3kb^qLI%Ek&fZawJRnjrsR2*8d!d;fJySh z+!%bMgxR&ZtUp&C}WW%+b=#Bf<9FiTPwbClHxwk-F~for=#|f zZ&t>f0FUodelF<6b!@>tL2fo8!pt}CCcMiq6rMgu{Mea0)C^D50Y0EI5Tq0#{jc#i zEj+EI(!$_dM!LcvL9FHSa6o-;ahGU^+sbt-YR@-Ity=tk4~C6FOMaUM!q7M$5C`l| zd}v`*P-EoF^iQiGa}Q%8JAsjBJwu5l#x`1BrqX`Ag)})dv{b5QtEzyL4s8R1Bdx_j z4ig(-g3--4RHAU78||BxcY+3_O{ zXuwuL_K^P1`8AN7^`Kn=Hksg?rM2Mws)~%Ki%qrQj$7>B;eS|)J(sLIzER?R((GM= zhfLs}>bUkbFfDTD<>>i?eARBYXiJ<< zw20|jZCJY<=?cRllTw10;=IB(L6aW!p+$k`;T`rs0r5ixgR<(SM=+sxa$-?S5=D9LV|yaAbcKGYfgDDt%UBF`oY!{j= z+{1w5wpeGdxVT8-OnhDT@srK4Zc7AFnWh%0b;5Sjw}&O?!^Y-eTa%cOAe59oz8$!@ z1k7n`x%~6uW8R9rUu^lthk|2yU!GAXgII$$ z-Mwlr@Fsbq6w*aR5WR!Zdfi5Mb#_|wiB!ArmRr@xHNUTYE77mFl#zD!7I|U>0yjl5 zyl^}#Y&kLwm)6>?&Y}PqrLfWACTU<&+^CWeuicCkQa6K=fu>^yc5_7jOHNP=ckl@fRd%Y3er&_`GkR^I4QNf8F$#8iR;6~D6!6@#%+-K{-e4c-SB9@0PWMhrsB{A@?}^lDsf*FBc%jo>dai`{QF z`F=YE`9Ou%(^2gsh_A~4!8>_dJuI7o#HOtd&yp>LYiN(!c4~;~(tNzY;;(-BwA|t? z1CuE|#zOwt^_udzH%I&Hcrgy#N90BdHu2W9qXmkWk`;S?r z?WG$%re8Ph^VHvp_l3DDroPR9>^Mw!*#iGp41qa(z;0OyeLcngj%cPlu@y&me$;JV z5)eFXh}t7hj95ydlI?_FEScj9yadHI_6nwMi7-^tywr$YMF9-JxHPTqs!6o2Lb2wk ztAv$x+_`1au5G9^+v6tdu_-EQx6psC7PJW=Kj7&tslv+=_}6Huj~6tuQ$I)mnRFZz)~Yz z@I_m#p21LOUYQWkVMN-$0Sg`a0qEn`WFNQRjT@)DV>^l3P2<`B3(K#Hs7z=T_FO5O z3AU;EHg-S}Cj+1)5DY|wdQa-UE2Yi$RWV;~rsAJx+UzcrxjdPlu?U_rLkjy-=n-F1 zELRgWv2qc?L{vsTmsdsdQVA{FbQKYb*m>?;qX{AC zOCbDG_TPKorkMTsy0_C`V`PFIgJF^{_puZ`979ls{>mBaI09V3UNq|j&(EVZwVs1w z;>Qn59i41_UO-=YF;*C{;gPF_hOSvgbDpQ5$2*`1r>pTQim3z)NsW6_K{gYZBn*xQ zslgC-qi$D`a%Wz#Uokg-Q$9pW0(6gqL@3p*oDjN~GLG0vRqF*5zOvWE|Bb4%kua4Q z<#&h&T=xFPCCR_Z4LZQ!;l{Lc&@YWXqkA#gD0s{GhV?&NYM5E`(^`A^q8Um1&Y$>Y zw5j^%pL3DMMrae7u~aEh<58J1jVWsMuK<2GN}4QP2I4iZu9_BuVmR-dqu&gH#1MJV z+kR+*?J)HhE%T$n{`K?OC_Qh=Fh2IT<}95nr6P*l;H%%ZpeV*4Vjl@9tV^ow^C~4Q0-x~?QE+XcuxQ&GZX-b=PBjUnfPl?61OUrlk+0tRx3*lvs}a!DnJD4F;c!W|MhZNE*?l5*tua&J@8wc?`#ofV zVBgCg`LUcZH08^7*f?s25*N;F3o=R?r{uahv-d^h0ZGM}RXl>C9Y{uWzjjznq+0d3m{Jvn`}b)822SM9(m$iC>NLNVxKJWC11Y=~>E`0L8D5Ny;N6 z9frokMwQSBAM;awgsgk$;ZYhwN(VDrJ9k{m2~)_%GmvN>SkbL~x6^Qx2$@lHNUsBC z0dC=Yx83b9E=8Nu%KNhfDu>Q>qMr9J6ZLTeY0;g8KJJ7FIIjv-0*r-G?cvf=83(U!$IM9eOtRz80a4{Ji!}eN5PVzt)k7T)`csONUN4)bV@R4 zJP=w){0MIM4ce-Iw^n}c5pSa>k%_tG8sfHhC}825C0wDu#$O=p|aTJXT%hK}`r zi@#QT1P!j}UzW>(gm9iGsikeDMU(GP2*HBXrr(Uc8QAc2YC12@Tzb(H+ z4%GVt_LH_>^SFE!d2)+?fN<==p7SNy#<=)DBNW{C;Pza>yt4ecx7Tqx#=6*G@&u8o z^tZQAwL^d9(n7({7TtIo9S`Vkk5tRDI^_hf|R;}OW7vK3+{_xX&o$!yAV(Mf5uTuHcGRlCK|02 z-qg0bpnNnxlDFvT?tVIMa(8+I1VN8D-ka`4&r$NPGStv7CSjid+wx*Tp2w6hknufw zQF~lNsdji}G_N^M6>l$*Hnt;eTUZbBnq!8QmlDu}A+(!WPOnuI@LZSPkJ`H>vdF%kj~i*7+MSghW1NmjLJrX|yiVoHu~Wy8x|Vql@784&!k82SN2*1V z6WORP8?oTs;{DnEkU1Ms@(_NM#RsnP_>+Ye6WReqcGNg^5rezxaGr`Idb{uOK!+F& z1kCr&OBZA|c{9#XJ*2i*f7uM`(r=E%>hFs9{9KD(v2(SIpKzc!7_^XA+BI|0q)qyi zX;KVWk1$U5PAOVd=;`|VNnySN9sq9KElRZ2tMi*Rp2^I&`wvQG#?JNqnSVlCYRK^*;19aqtAGMAfVe(% zA_dL!Lqt=fZufET4R!FO}!xohu+7-GWa< zYx=nJpdJh-dcfAnXqDEEPZ?)d?0b@QGdZkAss=?DAE^W*tIm?+SBuqNB2;X?n=L}r z%UNro3u+NNHPHDna0Gso60^2K#yWDOdQKcjUXSE=4CWpINex+ZtdsVWuZ!Hx$L*ms z@9EZm`v#VGvf4B7*!^^Q8u4@e-+bJzUp2=qd;9V!hyj8MxvuvogMbS#-QuE7=Dhq_ zj%Th|5(*?V+CL@wE8syFs*mU+OA#2Qabb*_*wVK16e?(?rudZ0m0lG#a75TOegD=8 znN%2Qx97yq!^v8TpSS%15;Hl9wR;A$OW9+LVb>R-v))$l0XEdoJUg8%{Q3joDNjsv zYt8&cC`nY&@x=V}e@`T?K(aeMeyn+;FA(fSQ~Up{uBq35c%PIb?725P>-8P3dmElM zYZJ{EIHcg0et(+n%3~B-()prQ!|w`jzDJq|CZ!IO;!!{bJtDl54fb`C_aEP3w>kP2 zw4m4J^NVu%U9V`i+&Z1Le3>i!ssR3_Ws_HXnlQ=gW86O~i7NEuyKfm6cX2revRo{@SROA;rrQn;fOpJeA*T!2 z$-aIIL(89uvaL^HncJ*_`|+ZL9qPIdKhoN8$aCBdn#5kO?>f4^Xz#=AZ>lwK6aqo` z*&%$RHGL0;PRRD2w9ynh)1reGHZJgoUVqu2SV0=z^9~4;ARY-mINf0O;CMx78~V zV}5bg#Kp#^jXLm@IWD%hW({w?J?0O;wl~b3mw!D~*An_Las^)?BDAQ8cD#w&??>u% z$lck4EpzL)@6FZd2Ze}PZcN=v=z^{ONSh-Km(z%;+_E?2Gin7ERA*r{S@!`}p1$xe@g0DN4TDrmxtej zcM6OGE)Q2*&l1ilc)tk^k6=?#DUm;qDz_op)DrJ~t*7hYtn4}elQMN&|E>UpCw2fE z*Ee};Kg+;C%1|r4EGytHc(8U2VA6E|NK^hUdt+n{%BZL#)Y_Sr1RjzOq6T%AtT<=I zTsTS?1kZG^^rYj-mD?NoP1O(F6~pq)dxN>X=M(}iUnSmKEs`yHAAN1}#^ud8Ng5OY z2X^ncW5tvTTbS*}*!9s#t4vlYF;V&CL$%PDcL%!NI&ufo{0 z(os;KR9~;cvS;Z2`%R}P?*Yd3^`d7f!ECB_5S5}7B;wKIP$Y)tw%yBq3zC1uGJ42! zy@3K;y5AhbVv5%h0TB%4rfW2*b@&@8$D3g|!@iKHI<{Y(Fg$q$YVt&}vd4}Rx$VGH z2|6w{LEzp(bG0V5g(#mc0OT)wR8mLZxyZZpJJ#at5-TjYN43ANS){>!zRAPcx$mTN z2Ohy`mah`S>e))qx2$=6ChH%@MbRpE1@GYICH8%k*OcXsW^1iMEqqWK!$WzFaQhld z2-Lh!q+a}^yF1L+%iNqkMPcpm41wpZ1B(xYnA1Ee$>ZhRUfvHOE9umf`in#8xTyTh zCAvdpTCSXbdTw@Zd@S*)@XgNYLY88WAFP4jMUzNWjOE8*qKP_h&MtXfk{XWCi)7Y4 z$7jNc!33%YB{lZ_=DsxJQDhb289cvdv}?9htN#s)?rBc9Q&CZYZq`;A2xP&G7mU;X zh4~{%IL?~s8{;t$ck@DXA7p6XL3`PGUnQQv_)kn{}Tbsmr^LjUmr75LhjKK>0B?*NYEJeZ`E@Xk)w(a9DDz$xegcX)C9)q&kR)Al<<8EzyB@L~rj+*)vOX)J-et7p-Vg66juk`+GsHHrQ_vTUw?ho(8vV^iC5ThS^Qh3A{@5Cg|Y!dD*{ z36j6vr&C6LnYw-NnV0GJEV*AElSb{AYuw1Iu8qJ71;Q$b6CWHLC^7TH)ddoIpl4Qc*m--CK2lK`K``*FK-IG%2->yA0ml=CW+#ad*T!Dk zW;#w#X2tbgFIGP^x%6wl=Z0l5ahKwgJ}rB2G7l+FE{nF5g8Xx-94-fR0@dCv*QhTd z6#AtT-}=D|BnX6JLD0Ea@>aMMa}sv*H}Z(FxjAt$u}jEU5CY5;g7&&?iCn1vKVq?{ zROu=)-mS^C)21ToMtc-IMA2l$H2K|@#(pC>Tbjn<-^zNQmY+lYfCbCq&*kBW$p${GddfcA_5W^(iyO& zXs#~h-kjy09v^Qe6_44ZBGHAtFrQS=uWX%Fhjl6bsznU2^#^$(2OQ6kK5w$cFNW-Y4-z6j<2LXtbLS& z@G%ooKmO{ddXo`5|IDK5EzES7&p>dknd0d^5-1lQ!nT;^OUJE6`2>H!K@P!gnGOg} zvn36<8}xr%09tm^g7fn>$%{Xmv~CcWUyRkn$PtF~TIp#690?2IivI-ulEC=?Evmv? z9Y?9^{UU;mX={}WYXS11V%ZTk@@H5Dn(1LcXHbrqgzilEcGIAEm|(#Uks<<*hB(06 zw#R)+0K$TBey*nKjZS0mCjYlq|9zkPqb=9}eSb(`fO=vp1V1xoWnpCPpy|R;+gLtF7TxY3Xq@) zhOVPsMXNS@2jUIJILP2$gXyH!&5xNsa_bq!Ql(maIz``s*Kp;sC@A%ER}298DGR7xSTSb>a*kchk)(r}?CIs zK07yuC|#?ncokR+1hSP!eg+;l!rmv3w@Ta(2!ap+Bjl$6v-LI8e>OI3tgQ=4+TeSQ zv|&Y*{cF_n2BWWVDu-3Y%^>l3FlkuhwP)P>7G`@LygWRH=9LDaF%IiV68*sLr~igD z&H2|S67TuuV|&c+$JVjK_T#wa`#TyQhv$e`MEE}?%<+$MdpLCNSYiKdwb+4zG-HwV zZQpRj))`cu-IUT7%wqeE>pEHpc^xbtlN|8}!(BDRzPcxlE^8}_uA2ujP;9I^$xMZ{ z4AL$5Bx<^OdXq0F>{Jnlz!MVnp8>)5PY3kzaAyPkxj0?`hY|+}GwdHuwFX zpLhs7+Z?;iY`dGkzD()ktCGF>l00(;4RHe!I@J({m%CwrdMoIPHpV9wW0+CNz+slfk-Ej!T{$LO36k zAIdd&P0`vmwF2)ke7oyia)kBgGg-aSYvj-66y+3OL|cw8>d&5)dQQchErR_ALNXq~ zB^re>fxllF5Rpf8{@@jWRFf`Q`)$5wYiWIbeHK-^^ocyrV05AO9~Ef#_qVim^S?z@WD&1j91ZKlTgjRFlRodYicXFS_VkWlB!47oHpq=fo*b zTyOtgAe;>%si)c7WMNJ)edC*4_ba?>8sXoMmfV;!>%w(H3wthj8 z+i*&^xbKn8lP6DNSAN)N(;Qw`E4!BK7Cm;ZmmBj(+7Xi+X)YzKIpa!hIwUg+$i7Ri z$EDmqJP{}EcU(t%_TAEA(nBjTqp40W_QI*hL%49Sfmk~FeTpdKiy223qcXuLE_@Q1 z!>dB|Vi+5v#bRb=-bElZ0Ouh@+@VTY_}BXoEC#sKl}b*EkT?dv&v4IZARDI?Ck%!Y zEQ5%M?SDC0`8HdcCSigNU;2k+kQjv|3(;|RjdE@D2b;H2e2m8sp)}c`{W2V{QJkC% zO1+?Y3e*Uo7HZ=i0Q2n6Lc2+L_P@40A!Wi_4SN@<&$ZZyrM{xyGaLnm!~h+F`9dw` zJzBrxEtWH~PSLZPtibfVL8F)mE}-l^f9qJcX+YkVF9JofweOh^gIa7HuNJpv;|s-t z?+^;d2d={j7J!7oF&NWh9?z_>aH_>g^8D{nfXRbc3E18GV%;z;Na?X#_Z7Mco_q6i z1F4|he7fLeQX6K#4s$0D+YaL0JX>>y;B?O-W&5E0S~^A(%Z;rt(b!cjg1D)EsQvymO{O#g-TES98xsXQcr@K6!+w`<%%jR1_dUvIZV znQ8}YpY!8wK-Th9<(ZSr{4?a-&SBu_5*S=F0?%gaSy))sf5}}b7bNGbgTe;fe7#e% z^C$Hr>s^pt{@yIib>zF544({Ffsq}1rr7o4G60#{MLhpZNv&HWIr$74=x0qMO6u@| zdutVbup0B0nfrrH<$gC;r`$F-z%noXF+P3>+}O61om=5wz=M9h$&oGPuULXQTbSy1 z&*7c%&02kB=q>rpHF=a#E+gU?6lz#>5oSfxpQFpf66FD=gc(qnOg`wj*Wi5yl}JCJ zO`R@Tv{*j|@}jvPuXSTQ?jUW3Ad|T#EQa5HHsll$5%J@t!kO&)FXL?K<6`yVZc-@o z2KCQykQ|$L1dnT^#-n)Fa&_!h;0dRv&ptkEEk4WXYQ0)zku-}FewEL-nB_biPB1XH za}mfvvc0`MTW6=kZej+8Mt~=2e(@T<4aor{{s?t8>YV1Dq_<{SO22QQV zP+$5Ys97-9RbkZpvwla0J`W|&!0_9r|6nM*K#781p0f@?7Y#udfY>wLjaDd-Vuj&n zk2$$nn4)4%>bDp4gL#{)p@qw5?5{E~sl69#azW^2qlnMAd&OnagW{QGkNKEOmU-3G ze$zWd@|B5gm~uM&u3vb5Fd2PQChGQCY-kv8Bi@==94M7-2qs zt;7rp0MtMn^*O~~yuR4G`DQJ_FCigeom9q*NJi(|0{qyqg}tu-_2pjtT8C~eN$rgi zGe}$c|DLg{$o3kf#X>-z2oCbC8A-Yi?d>;>Ii0N-*FZZ>w~u%eNzb;%nak?eX0(F`j7^@e{4SQZW_eSkSr2JjZjz3k8ZFuwWwf}14v z;hg(86c9oZ66GYa>I2M*&43}n13*0rIza(Tw@lvZJVpnrjK4#GScAe(IXMMgcl{3F z>Fd2)AGLb4A3FY;nx39EjDe`$JGH;RFL~J|c?oVZZbmv-ox)BZ7U3XP$0W0w!Ttf= zlv&slNzzbU<>lqI-(0i@K|q+;($sTZXZ%D|s?@)p?PY9jy`CfPmq96Y6+`2_7=qy5 z!H5(%=EMhj#Z}Q_eMo+Ry?PPIl?H3OB7XX7-0}tnGIwFI5xW5jsrXetn6=^92C5}h zFy!3F3a}vp+I2bRfscvs30X;yYy%23pgbRRIZC#nIZ&)Uc=*b!4w13{3{>HSPA-fo z0;R9yQVFDE(OH?+4c7)nD7Y>4enNu-);#t?2V)TaZMyE3nTi=>1@QYzWd2zvd=L;c z|E6YZ4ytQ$6rd9Eq30ls2q#?57F`Xu3oZ?5Y3ZdOV;h(`@{C`&N$35dWu&mwxY7VA z><1Y2e%c;PRFYc274GfrZG77>U+>vey#s0Qzkl8g3w!o|&stplVG><#R%VYD-H84_gQcc%x;^ z=RLCDJifZRDp21Q@8{6A_Fdw^bnMTR8wwX+O|JnTW=Ln|zQ2JcHF#ekcYQ)mg5`=8VtZF=B zihR6IVK2({sVa;3Pag(QKXvcCjH7xXvc0!AvVK^;E%?1Z#lQML)QxjBCM>ac$r7&A zm5V?I{1-55v9m)3Mlub#9!f~S!a-_Npq{RiRn()v zmZI)jP*_+vCS%A!B7LX4F5EcGheWncUDkU;+>`_;K@}QfeE~NskLyRFSUx8*5b#bCF}i++GJ+Ai_DqNv}a^xdT;%fVbc1U9WShw_w`ox&C0<)uu6Zfx79sNaX&}7aN_I_ z3}k6YV$sE~W;o2kIK*RutVeGgyu2C)L^UQ-e*gMx$W`j`j<2}3I%rKD28h7S)b?vj zI+B#EYtSfI_c_#$nQ$=8_$Y!_P@8zEm8YJw9v&@~`n+!WS`EZJ2G^2nv)xdSlPD=D zZc!N(YYuqKfn+19ZjIHmJXO`wUEq_q9hW(^{?4#7oCGcOo$uJuMSFEW%nMqHOi@%- z>5GxPCj{uQKI+)2N1N)+>F2yEO%6W*>nvo|qlNPRno9+s~Ak^XuU;53N=} z*`(Z{VHco$So}sq=!QeZ05_P8pT`Ewi!7W*MnnJ!Epu|Vu^0&_f z>^ZZQ`%HNr?VxFoO)a&1g%Mrjj8cA#m(R zMn?M2I-DQ~YO=xn@uN1XoTnSy$f2DZO?Itybz1a^pky&WT7bPpLlz-Jk4%T88=llb zF`Ab3hbHoMzh(E&`E+-bT7bx+WZoM%n}gX!x%J1o`ue}#VpBz++Vzg+3s!$@QZsM5 zQstro<6U1uNtV$~_xbsG)11g!rnkH-+bC{#DJg*$l!cjq48x%ixeWC6Gcq$fJ$u&= zJw&KJv09SJ7pN25Tf7T)fq;@9P&t2SZ1mav)d=P>pvM`;VDwFJ&x+@cl+tgH6{_dF zmFZC-cUCpJ$ZiOJFCS5TO(3;kHs>LIgr6SOiEuc!E_j%jFk3t;nOVA3rj$YstP-$HrP~P|dUM2u7w=o>H~-6*jMQkL_8`EVy~+ z{kge06m=&8Cr2tTxF6&xdE!4@a%Y*=JnGKCKDnYOVJdTQpu~H^mIB!E=(lIggsUY& z?F2{k>+4A56coI@v_y1awfbd*jd+%_-c4y~It~tHhl^&xz)6#5?ATo*77@6wp9aM%eG9D@-DI%$FK`~zoaT)+3Q4F}feWe^ zvRDV+El{3A*8lhm+lOX%PrhP)z$S|i#Th{xc;kf)0@o%QRU%I+F=doRb2{C(C`;#U=>ABTV zTa{`1fzx5f!pV2+R`hVy`;-#?{}9gC8mE{SzHc;mlV5?2ZKX8AA3i9JAj2ZZk66*a?$ZaW~+1R5An&!J1;3x&`F@>E2*g^to0jEpKJB}Kw*OnK+d(#nc-#2D(aPIHuG6xNE$ zQCLXDh2)-8PFKgIBq0VY3|?AE*Y&T+Or*%JbY6>y!a|m)nQT3)rO#tF?k+B7HPc|N zVr*;-8gjeK4AlPnox#CK&dGB>_B&Q@2aRvbyu%Ym&awc}zFQ<5Z^2Rda2Gr5agro1 zhP1zb%OP*DIV};+0*KIMIxOH^H&s^hSU$1b@X&fnmIp^-RE$|fB;z&T`dd-2a+r1I zpy~b+96FY>*EIzUwI4KOBJMUiS+44!Vs){53lA?sjE{gcD@i*~RaZx+Z~d_OO>{f~ zWGx7JTX^j$d%=XhPzAzl+v`ee+JnBXE(=IEjr_Yog!2IaNm1qI?(;c>?;GOe7$UpW zN+;BdYlCjCm3NiYh<+&D#>VymDKP=^+sS0xpRg#S1XP(KyOsnVKD;TI1}E zBdam*!O0){x_&51SVm`$OAWoel~Qpv)hKjG{2=U z=&!FjqM;myHf6qCyn?)Z6nItoN*Fy{9?43%=I%>;G8duz@E27Gah^Vt@9(q8I*cyG z8zVtM-l*97g4Oh41ivqr{_gCjTF4yh$WEiBygDp~$=d!^iuqKQw<$c1PMm1CnpgbVW@bi8#w>5l#UDVN0t1q{g3A}9fb z5%_7IAGoEklZ-W-Lcc`W%53~=e4ijW`$+HD;9 zK>K26Q>^F6tDkT4c_hjXJSvhh~pj_<^PCq||5 z!hWs6;C#rAdza5=R$e%tB-f;Z_ESY_`F9Bkp}Og$klu~p-F-0QM0391@OWgDy(!?e zs@2^(MHci1L26aauVsJwZ!KXpD7_cWk6hIGuuyNUtFo8e++=VeD_(68k|MPJ<_K%u zz#A7X>$)9Ox8M!uID&y~1Jr*t+7`wd@kHaIUo$+_j zUF1${V)s#A!;)2WAa|Z`E1PlhD}#M;eGU@lB-^ee$9(c%(`ELnZ1?qht!n)G?}~;c zc?JccAHmy9?w$9B#zg}e1$=>(!P*PDW*-g(UD%I@9e!a(es&CQ8}Hia%6@s+*0k6`7wmb?K~I7fC8_5u)tKx{4Z_iUUz|H2cjq#MMc~s= z)6htIZ2bU3z=iTmFG-P)P1a;86&k_3RCs}f=gIRy)aRdLrH}OMmPkkwq&HUu%l)iR z>H40gkGIQmxO@5I32yc^+=9{_p@0d@E*cXPlgtmr<);#x(%i_c^OiKb)Iyqge!U9b z27g-Mp-071u~Kj`R;;k?bNyXU-&r$;&E}+jHRXYY0;{t` z8&db}%h1MbO>w3%o0Ee2X4}2aQ2FqFt`mn{Jolf58NqF*j?HZ^+6Nl8?+DIRboDT) z{9)f6%``i_ZG5_P$~i4bQyX_zPWf1hIS>Ee(oyTre>77%AsCo-# z&J|vXJoegu4C}G@SM_yu+KcvJitPrk0+c^&i*KJn}=qp% zZY>+8D=~+5KFTGN$i=@MMu|6pA#NJ8_3??2-miJ>SJdZ%YD7$TU2d2)YYaLzbV)Zf zpQ;gkx*Oa7GI#IK{`D)Q3B2?1Us96?)EGvAraYwLs%+S!b0X>useVWiC__%oQ--Hj zW+UXLCU*iS{lZ#*)(fsieSt1qQvZ*`O&&|duKet@)Y^x!UZ=s$6JyIQ0WMBxZ4wkB zoZ$p~q`0u);_onynh0!|30*#O376=}VC7237*dVkJ1Q*;XrNZO_4 z<${?*2o6Fv_22>$z|;d@zedHx_yV2V$=g*&zO2iwR(}UX`og7_eiUouW%@&DC(g{{ zfpPC%+V9`LAtGhqmD*+Z7B+b%I(^h=xENiSq=l zPo_d&c_|e9g(?Uu@52X18XB6ogQ5BlZAM7t3NN>Kdr#a_va0&@cQV5Sl?N$dlsc`+dy z!OWnm<4H*0p@#s!as0OAsd0OK{RzA$JR4ltJgdd`lj_B^~oiAUX zwSrqokJxaT(ZrnO!UIosK&}2ki{ReXbAgTq`i`8(JPv3;8diVusua;_; zz1%1SzMFdCI5rwe{@7=bB!+1qfFQvj^yHBZh#5y5IgJKpW@euLnFiPRm!YAXR4yaA zk~h$qa}&w}MdGeR>emK;6wARe51_ExhK2<42V1*~U$T8CIY|agc`AjK+Sn^Fgt*LJkjwKYt13Lm}`wG=xyKxvyzO-+@JCDGE-l9iG12Loe}4sRb{*}4mrp52QV zGcz;ixK0nlCSEuI09IUV>*%<<%mjva2=EO)UMLp4#pVIl&CbFVL$dfaKMw|v-~x5^ z7$ekdDB}TCBJm(TDld^evfrpFRvZ7CjP5Sdi9VDe#dA+@IKkme4rly_%&oMbU zSt(oO!TtLig5>_9q0hecAc&USCh%#kkL2dQeJd}p51-7yfC3bMOwGHRp&N*W0L5KM z@)2>%s4JW`tg@9*7qq<}kaH!l6JYt8^7>MxTkxxa!`aMQsls#VX8mneqN9l~TR2fm zF?}mWjjjc)t=U~m-abBan=T&2^odw>-H?iSEC=o(CG?>_P@+$CKe$__eXGg={?0=s zc?P;ipdLOrSelm?vYRwXf%~p8B`s~1QG%F=$gs&Bn@+%Kz9pyO>ERg+R#i-HZ@SDt zqm=Q0DkM+syyy^ zxBvX5snZ?x=nrje#(AoiR#u+c1m*W$s`fNXqunSj9-ejFe;V3ajuZs~=ir^NwG16C z8Ll+2qV(mc>M;*FM+bSg{ISsyZca{dx8Jx4eTt3i)V45L+K#o11UgzMA&*KYzK^Tc z={tbKecZ{AP8@Y#@-0LjbF);6z3*tCy6@vDN`DE1!`$9dx9 zw6t6?pSz!^1y!o_%isg8M7UpiRmxjCJ<`y~*Dg)`tcdn9$7bcnHj!zVs-|XGW@cqU zK|xs=@!1TE+bL@|-XjUNkViF{y9jgQ2O_48p^Q|MfVi`WWpP$~prw7s{Ie82LU%HYZ*W`;jM`&0a{vLuQx;j5* zSZWi;9%XwPLJ;%bl5ZNKsG=gGI(>%H-rUxy$u}H*fyTq5w)edy)7>|8)=uqL>7pt(8`^;b3EviTk=g0?OkVR4f(774Ve{SOTa! zI8Z*_dtzs&Xs389ckg?9P>@`MHN}-UZK6CnN{BuP$Cv+23_q=V+@wb!d3U1~tS9W7 zgKkyoa#ZQs!*+?Wr4d0*8zja;LkU^@D3Ds9m}X^Z2|&$4^1Wnv_tmJ_*g>E!Zm+Eo zlaN?xgayH9MX6533Hbk>oPhA??zx8Jt#4K}t&#kKf}Wi((czPpj=+UduQ7bK?fQJ1 zEoBShD99tXpeMQL>Q2E2LX0D;pnx;?3W@q&k-*O~baI%}xSaD^Ku1Hvr4Zy`WDL6| zjO`%@A#3QZeH^p-sFY5h)Zb)_+ls9esWwV-tEA);FS*k0E?-^|%lja|d=yF&66QpC z#mr>3lmVL~>(lf+r@I^^gI!&rgKP5#-(Hua`bWdH8gU$U(P7GCn%ZPsG}_bi93B^H zEemkT{B?|3*U&(S_xPyyrtQ+zO(O}gMII~l-GYxFF{C~^P3;wmBKc2##4-e;9EkL3 zKnc9&8dCR5^9_D%4@hhS08n01(p0C$#K`#X>BeWRj!R1 z*?G#SBd01|8TzpW_S7j;9-efiR^ye<=J7n(#QCvYG~GhnNVHb@hcAcKS5k+iVJ5@bb+QgxwDz1WV^v<2l#?* zXkoEZmTnaR)2z=25hWJkE9BxY%t_JHc~{>vL+0cHbPLDfF7WK#g$+wjPtQT32(tP{ zQxS(EJTGi{wKX+WiPqM3IGuN*eKtIPq=&pt!^K7~x60QlY}okrH9V4(l$Q*P4o9D# zHjV(5vNIq>gc9jJE42Pw`-&%Dx|g>5wcl_dGRwu%aubhRagm*-Sm9D6vCj$X8B^pb zd$qB{rLa=l7|e60?HwH)3MA)V)WK1NwzVr>n6U7=LwDYgc~zKHUS_6&fx)Z^wk9*b z6AvlY4Tc9eID6T>#Q>0m*u~9l`&+G&it3*#u_Eda>D@P39HU#Cgs`C+mM30bP3_4b0apsX(-=hqCyC6<H4;|vES|lIxFN;T*fVL zv$Ka*4#4|hxBccCj2328bwfLml2Pa^>p%f&&iCx^8U#d0HsO2284m{Mi}bX#JgzU# zK;|8$WkNvsh~^w?(XY1Qs823F9?M16ou#cqR6-D(QeT}c}I$iVd%Z}qZpD` z;y5|bi-mZ43kL^9!CEl$DSii}Pls@W3%SsZ{O zh^kTpld7obe%R=wNl?J=Z|U{*_0WbzzeR#eC6b(#m6q1vxibSv%i`i9@Xu9p#K?-o zS~c(H<$)JFi0upv4?|wToWu&D?E8nO8imSrG)S>MzQ5D0;S&JjyH?n!3Y>lrvjyW^ zI5Z;?7r1mkW@Z#B4#Nnb*qH(rftp%kWaJY-Y``dH8Ca}t>rmeV9BaUl3M9Vawqeh} z!0Z2vF+_DVG@!9`cr>npPs-EL(*sZ~LAmt&b}V;WaLg;<6_8fG2hC6Dv6q2kdVxA* zJP6ibadE$mz>B&WNML=vy`e}bgA{BanH5hu{~7Z*G|#JRY7jzQ6sqKvUy^Bi+yqJQ5f|$5CxGUjkoI`WITK5ygJ%+H)hPx$N zrGUradHl}tUdHA-qzU{-CuIXp?r;}XrLjRn1Co2UE`)R%~Rax1d{v;N!KU2td?to^yi=SLunU*FlNxr&yief>_EJYGF zw544a+ng36^+Y)bTmYl7yM(Y@oFjV&AK`w*?8Tz{ZB&5CPq0icaG3LycrCnGhY#@M zzbALi;)HO+c*04s=$MtbJ?4x@t%G-oXqRhrIo98ZsA0UYvjamaxM63%e}6ym0vf%T zx5;6g3x8U}_2v8b?^{}0U@t)p2z>wm85-Y@Dt4A5=q#V(|Wbxcnh=gq->oK0c;frg9;Jj|jM{q^GABDrZ9) z{sarE6s0U7UOKuEnf<#L@01H?A+gZcpMj0JvAqrG0X*Xc@H_<`Jdl-@<w1eS9ySlo5P`3rL0vr(79GhlUJ3Bj& z-kDc%=v6bav2BIlGwvYWTWG(5?=mqlA+~?r3*v7)34Qte%Y-b-(9d6m<_mQ25ZhVI z$5lxw?&^)hLff3ZAi_`S#A+GJDWFvHa&nfve{T+Z0AKRb8GZ^3<01XB5fIdl1}Uwd z6BA}}_qdI5p9I1jHGnyP{@8~>3x5W}pq?ICQa|iY7Z(?}BB4h0dG_pfrEp|vqghwP zz`%g8+wZA_U(~R5{Z9UbP-DWU398tt=H~CA*41Q#do=v51U>&HPFJFwD{`Q&T&KUYwI$;A zTj_IGF~p)Qp{4EZW+o4n9dm0(jo2UO~YZdQX2)^TD6YeX#>Mss9Mb`_mPsa&o<}=gYJap$Dto zai;buTMD&g;4f&AnpgP)egoM9xLK5xY{5ZgzIP3@2=Fn3E##&vAR#L12Qh`1m{?Cw z599!unXRBB1@(NDPGP*<3Y<(h*^hmHY`Vkd_|@vyU|a#6{(k@fJ$Xfm&F=cdmj>mc zgUFmG9XIUGbJ?%K=~*Q4iQKM3tZYLABhdADN=h;)7d4J*W%FzBky+`T6{AS*gpB%) z>u~lF6OB@ypXTrM-f{wp*4GiquqV>y++XJuS$oTEsS_7}nTY-}a<+t+;boqdIE&OE` z^e2A_)6*{le@)W&sA$NcFP;LEg*0C7+$H8c*aHQ(}TwcBlM<6OL zj!{xpP7VS5z{S@<^8!u~NHN~MckjuDMBLB5zIDV<4?NAz2>$Kc&lD79;JyYbA{Z1{ zT3RaFu_MCtP&z*}Q!y|x*VooK^{Uar7^|aJdgH-BG*`Qn=~N(d2w;?x)6<05*w8aN zLKtUhyE>We?v^=!5fK)K7#;=KC8F*E=%GBlmbS)$XU`Mm1zcC<|E*7y>Oj}BbL@qM zz`^M$2yj3J!mdk*Pywi}=8G&PyLa#2ojV|SYZW>8Ox3`k89r6GOukD;e^B3qyQ9*y z6Qx*lYj^j0LdPWWaByG%kO>BQddNGi1jzf@q3eXWnt1f1WtKNLu^(oM`*})BhmP6! z!)2`c#6eIfo+ttub{LxYy`<7!MNgBM1K`?$dN;!DpZ_&{)HpaeJiUNmfldiBGBS`f zNv>qI%MDoxurTl_Kf^(WR~ToBA6$dxYK2KVwLMd%I+1LdN&Ec5f~0_e00RT6g?*hd zO8~(%q9jB^LxTc;ZGGJ$P9;m|@dpte$)p3|J-l>G;GM6rR)MY-@DjHsN*&tje*E}> z;0J$wP1hZT5d`Ic`1wXY7_UUE-=!4wgc-J;zP?;xH!GO;Qhl5J=j`p6Zz zIul=M@%L~9Exd|sCGO>UUs_wz(de686-zrFL5sL5Vhh4vW}(i@zQ_E#EA82|;6gwr z02MZvGOb$+3JCZEwG43AhJ>-V0=D0Qf<87s@8|~nREUbX)P8?>DYz(!iI0Jz4Pz)m z07o9f|7SLQLtqV1;1ftQ0A3PopyIc}&4N^GG{s8{AkZvfH!L$cjzF*_g3M}Fpich^ zmibwZ7!3B{iZU^+fvRMy=MMQ*J$XDcBjX2XLT*o1EWojWTVul>YA8#9wIJXC4moX| zTku4%f9)@%aakfB1A#>lWgTsjOqK&X~1zax>6kwM~_BN1>8Y``Xv zPrNHfL{6?CCmML^9i|H9CmiTh4!z~QJr#MNv8X(KIt^Rb&5i%69um7+pcH_a4Aj6p z@wXs@fy^V~n`EcxUP}$+2i|?zqF&2*)fL*Ma3uFuR`{J4AH`L5nhHUP*3~7=9bqZp z1w~@)O$j*Hz`+D+AUeup$XVc4z9NFmjhll*i7O3Se9gm?;A!s!m*$TmwWB-GMF z{~OG3a?JWsIU8pgh$%qAzFyly1gZ_-h>zLXjlAVWWt;*G*hZ(ycAWVN?lkR zpgF!A(ts0T!;KmQ(-~%FW{|1d+S;D*Bg(rJ1)vl|M#$3(blUynMBm!(arU%9aOT6h z<-0aO3~D!kqidCFFG=2Ng}lrc>`34UV<CL*8z}}xc+BB-+Cl=-L1*W{_z_r1&?5ZK^6ImN}$-CG`<^2Cx2s2>kU)h zx!CzNWH~_AEa{1;?~*L;=@_adW@i&qJ%|V=Ku@*82;cXxV>?+NGQv z94%1MTr+KVxM)V+ec;za`PXG5z($0#@fvL&l{sm1Yl|&W zUSD6IcAlGu=k<^k1#ScoDB#){SVv6|hIIGw609JIBe)p(b#)UXBMIdcP=LY+AzwOV)X<)s?!UQMxM@54P0?yB z3Fr*;a}UohYinyE-!yFTt|};yld;J4ZgRuY}y zJ5BJg)y8Ao0QfK>@rtmP$y+@1!bf@8A|>55z+QS-k@AX)YH*1@?k(;SOByX|-YW@` zfX}k8HDjR`GlPi_RT^mZqci@7ItFfOYEjQm@7{G-@*zlvM|J@I-asaL=RHtd7v|@4 zQd6~cb^pOYj*N^9hvDZ+urPyU)1bleLAnxPi@vHP^25R`qsUBw^*1PI50BuFkR_LNUy0(_+<>24LLx2t+-6D>bA$T3f z8zBkB>;g<_LvPa0$0wdzJfmv~8dFC{M^Gz)>w-k?4OEPYv-P#Lkj1=JVtzLtwhE(D z`}qkHFVVAsq6D{`mevG75RhFGrYmV17~HZ)_jnxuO>O8A=;&as33`he!>}oQ`T2>8 zA5RVq#SdA*MGL8WO$sZX`~e#b z1pHkagu(#ynW%US8x55&Cn)jyz&2G&3(9V27B9JCub)E}3qrWoeCf=}S#PtlC`n17 z$mv0p@GLBapFY{cviM6#($WUMzyD7Jsz>O=I5=>|_Tb>+5@Cls-(=u9H$pSj5a_V$ zhY;9qW1z~5O63tS63V}!r=1W_<23L*Se8vucwqJIZB$g$moHyHf0OG*+ru?Vkv(U*ew_h`}%`2wLL} zWo4X<|9#QXAzyYt3UUEMO^ON%0QMQJ!~D1EUkgB}pe%+154QmTEl};kwu1dBn65-k zO${}}=Om_G2~{HV>pP~hbQp3`wzBqugjx^u6%-YGIE-byiz*&gg!a7K)jDW0=xRgP z<!np>ier!ucjC z3BU}XJz?|_r%2<-)~RV}h)^nr z<>T$$0_Z;!QC2nS?~6)GO29lA(EUG<`GrSPLpS3~7&cbk)YKFVeBHH%?)(+v$sB(P zg+XI06c|C$)X2yk?;iK`_9EP*ApNkaAv3q=@LdPC{F*DvvkZ3#xUI&3>~fj3;r?NS z8?=|}v+wbXsE0YwP=K~=|19*=KIvA$+`nQP7bhntQ`(1uf)MJBrKPdhZ-j|%|M75qRJFh@7tVSNqS{Dtj<^=hof9wI4`OGSMX z>-`5gKS41C3CRhhxBv^rRjC*M0#{lHY^dwNe1!rqu?+`t*@Gbhq{+fXliM(Z4#PY6 zhc$kujxYnQUK~~C0{^-y`WA$|+5<{BA0ez)P(bKBuzgbIQD_31AHatqJ|YX03%ZSCzr{{ENY zx9*u%v9Ytm!3mhM1<8zp3nsWk;t1X-fGg#+KkNQ;xE(_=NiBfPtgmkXQz?29xxkT7 zp>OI>d&LXB=zLRibhRO-0KzRMWWWjmrm=X~M{522c^RzHM_uEQVG;}gNTBtkF z_#IH`;*^AEP;O$?J89HM_3+Ej=9oTJQUe}A)0Umd*yvM^rl6%8<>zd;@c)>p7hfT9 z#8r`5Xyjsk=64@Mg;DM$7~C>nEHAtj_A_rR}|%KHZi zdN^qMqd}G8tEB}aQI>o_{&0YT;O}ZbyY?rfzGE=TqT68tm>crYurE5AnlNF?Lr;%F zm;CzmBYd3FiVDnw(2;c*4hGSR8l4K`g8L=K#hZWyg;44PZwVd}ARQVp@7M3|0|j{X z>mAcmc#cL`h+VAsPEu$wlYJfmQh`dg2zr$oku34OdvG(&KH30`N;QiXj!AV<+hEf6uut+2|He>YFA4x5~N&+qQ zNT{x)gzdGQxw-kza-&->VGKtIW@_MkKO)^fIk|>O^8afGkuH&!^FlE2$r$YBMo8 z3Fv%%y&%1=dEC=KDbQ90bgIE=pwhY&2gHZFEGbix*(eHSOM2QQ~zb^>;4e-93# zV`H0rkJ(zt&5B7^pt|_H4yqb*?Xxh&Yc_8+k`wS}igzj(D&V4`4Mb}+^&BvWN6E;} zOHkjwMCYWX?UIQRs)A;&31+i(&7rD6czvR|p*9&9g_pZH<-jG{x_R86RsKnM_ zeyw;;v^YE}i|#$&;)6v8IzT{{n7#ZkxlK0)>aM~(!B8?2;UQ=^*AZ~XF?Nqd1m3w^ zxVfS!N`sS&diC!%v8h!3D=_JkF@ucqHLCOzM@OzA0gTX))n0$7HcuY@s1`I2Is5z0w1mLfdB1G|Aq>bzC=n0MyQA23%18I=p#~?o+N2xeEGwhMfKtuqvva2v z(u+Fs7Be+9wIimlgM*>=TL40=4W`W|v4wW>*nVdWd9(;cvoJI zNBK?UUvGK8Y;XTEUjxn2UK8x{)i_z6SEt9#(M%FUe+=-(4VT-o~yrXVM$4Hps<%W@7+!d)u?VW{6x&Z@R${aMf4m>;yyn4RrL+p-&f#0AG9iWh&~R z=U0>8dMnAp=VKYqFt_WbqIkoHNHf2#y5H~ke>8n}Jk|aG{;^MpWA8#%NcNt`E(w{X z?7hWJlD!F$mF$s3vNEz)X7(m~rR*dbzvub<9^dTg!TKNWsjFixUqW1&`P7h*Kt4oHK*Fd-L$yAb6^Ft$5S2KA zF%&l06W{>dQ`&|`D!>gIwNa`?s3W0D=&=-xn8_pPX+EBD*h4<|)78_%t6Km@H)5V7 zRsdiSv4%cC+|_LSk5Aytv{xY*$=L!UggwN^harLo$rrO0Toxlug_Ki1+G7GCY2POqiy>Soqn6*4dM>y4=wCuxjaPY{!Y2wjfX> zQBhHR#=u>`qwOI1qc}_K zP*yCvdo9ku*w`1)3g{yL5xg1zGOPwiq7o}h`rD>Z1f{ZD-5jJ-P<+s_;ErMg@t8ca z!%|S;iNK3`aYhnlZ2*G+V!VypLIJ2b*=ReIB0YI#FO*qdwR>ssL5PFTy(<1Xg93>QumS!l;xj`?{Fg6M z`KBv_>>8%u) zvw#hKJ4@vX^tOKv4~6Jo0+R6h8zG#_iL{gyg4?txlziEBb?<4Yz)jiUJH}rLLIt7E zkvR~&fqZJhkx2Gqb!7$TXaHCqk(`+UEj%L;Tu}dcOXhnb;4f-z{|`*%(EFTiA}F+A z8n1)|opA+JYRXK9F4Ygs;vsqZfR^hD1)B!YVcIcHeJluoO&t z!CnbMsO$|12?;G8B2!^{K;6Jz46qZJA^&J_d<2>)AQQo$*WF$4t@?jI!LuMhA@z^PLFlvG$TpwFL|9fLT9zCDyY1sY5?U6L13jSPN8)oEdh{6=X|QI z^@cAb>Vj(^b8)ib>hI0CO2!-lVEake&#w!mEkougmJqHG0rPTxb0e|}R*$ti+HP)< zUzFaUXgDbn2&3m?pY$}+Mg+zak-nt8wz0OQr>o_=KDL{RrWNAq|HK?4#g-(?bds!X zXox(++{*l8{FFX+B^v-c7XRsRQ9`muvOv*Ud>4!+QfmVD0ZL_fD6jP2A_aU0M^PA0 z00$2~e{?61<2G^IGWn-*@WTY~zpwA?X!fp2lvu$ink9!-K;V@wR!$ZjB_%FP2h2x8 zezS&TZzHA@M9Bpgw!sva*lS$U!MxpmW8Z_^fwj zKUJe}+@8I&JRN8^xL^gEqKWJ#ZqEpeu!t$u=V=hn2s4ja5*AIrN|km!*xVhu*=uI4&t*?Y?b;QFE=+kd;0`6wvZPIW(WvTz8uF+-oJesAzioQH;|E$ z!KB|&R0^hk-V&+pOJNx|TR0Nc^MCes6`IaRJsE>Z!}~JQt0BgJg^M(v-f6Sa`4tiE(lo z@RG-BXzV3E-P|gvwW)R9#V2BkMBRHMEXyXGUtG+=^FiJzV9V-82yJ&;0O4$Eq>shb zSIqlDpvStut-XCjR*5~Es224P&)A)pxfaUtg!oqbAix5PoGdMluS%l38@l$Jf1$y6Eh`{x8^`|0r66|LrM4vH7zYt z`eRd5=4QEkFA8%e=00`UcP+WL@ZFy(AGa?s_!ry^@2>pbp}MxxQd2NJoq>nIt!xcV+8x1B?#>^O_p5{P-zmV8ZdGKvZFvPhBk=Ax$uTfFGts1f^WNLflT<--NqUu#Tngta<7lfE{8qS=uOT95eAF?$gW z4r!W_E|LLUfSqYA;-#IQT%RLdj%_eGE!$fb4Q=Y{>jS=_jEoF$y3A^(Jh(}&TnTL1 zg-cWK$zTC;)h!t#(?=E4j*v+y2@7;?vV9rm9J+X1k3(xz_Vf`x4hwZ?s&xqc_~z>S zN)X2SbuW_bD;iGz*0Cn#+H8yu)JB3aVuyT@iEeFkjhC}uYXQ-gkWI{I(~wkN{5IMp zlJ^A$+L?& zBE!J(a(>b`ZYUP$1pfDHeZ5eZ8=g#J4;vFRqbPw_L~=4Us#b!LM9D4z`Kq=hT5czK|E>s2K>s@fYJ%3$sd7un*VXf zgL|pebV3bZ&^gRC*!G*t%h>tUcQl4ov8~ncgNKcTWPtgSrZF)$r(r|hQLgd&zcyxI zmk-=Pe3~l?ev(+vq`^@McIYq=`goPR@&zbO6xlh2g(+8KKp^xwIk~H&1KK+-;UE+} z%ot=wD-`tfl6ox7E2lrHSk{@DgRI=cQIP)SQ3W3}#ZXUUdipFln;07#AMeb60Rar) zoj^pE2HI6L^{s=wy=<>?)|A;P+w7kd=>r^B3pPc)B~24xVf27`t9QW7853w!J1qs_ z)o=aMh!Cl1Y-Bo8my{HmP1G4V{u0`^me7KA+9JQU?p}99zUkpA&PYrA$_R?T=Bq5Y z34G_<*<6Hdei3XQ-hu&d3jRq2uFZH8y~BU9X}80csDF3{<9~rwnXt0^E?R5~yVM(smy- zPeoWmTAqT;yr*yGQGX;t@!HkLYp_G+N>TZ)43Pst!6#&d?g~6O@YtxnqJFL!mD^&V zUi1-2*Nm_G)<|CFFkfMQ)77`+dgLPmo+F2UkEtKg=J!~-(}vQ(^}2TLiHS)TOdgVw z@Z1Ps8g7{cy_e+ea4<~>%p;5D)*z~j+B_B&L@)b+6P|)kw#k^zaQiXu|Qf%VxrtR~a zn~$1>U~s8cFap#51!zQ z_!S5Yw0o$RmMExDOug}gv0mGsYQb-6#A?37DTwG>3!1=SMtsrNF78|~`@T}D1xX_% zVQa)DEc8ga1Em|pm*<_d7a-^3zvaQhE3L*wnLvj_MKWyspD6P~$t_vZuYox|Tm~pW`Ta&TQrai-MDSzr~1ys-2g5 zl&R9v6v+Y>gF{!^;b^S^Q`7Lajy`?iQ19jPO~sq_4=YQ)eX1q95^LwG4GwAAucX>J=! zcpfNeX)U&KZv5+!&?h1%^!7i#{zo)(s;!3*H*5o8i$X`5zT+VpO$O2H+lyI^6jiVbOknxanA)Qmc7jV*hwV>u~!-Dp0 z+#YY_p4cqb`;bB^XlQ-cJx*@|ax^d@`XZhZZ7AT zXcVcBFUV=x0t%EVp2vfM>;VQGe^?HZ6j#7ND4at6zn~nLP6xM|=6%Cbt12CEZ_zyb z{X0X*h9sr$*6rKjQ#|n9U{?qrz+9ZpatMY1f;MpI@b;ckd$C8IeRuY#jKkWtp2-qG zbl_Tn^SYCxV@Z!?z0dC-FolG9`(GH)OnWn%&Q9I6LR3x@Z^>n3AiM%J>`p;Pheb8a z_yqk&edufZnmZ&M#lNb|sDOc)Fe0He4KRUQbDy;;m|Mjh(p0Q?|Cyol_59POQw&tl|)io$PJpv5rP5SaO!PV&K zz!!`SOPm2+AkO*oIv}D zp^Csz0kB&+Xk`S``fY`0zUFFCDksEev_L_2XA!%(YRl(7D28B&3hoabE~9j`T43xv z4lHDtH<=3zUp1xuMzDw?=_FemsLeA8l7U*^Rtuo_47;3TX8X#Q{8+cuHUy5evHpcD zTdpEeT8oC=e26KP)}|a?rBh*X;wCf_5L;zcRVe4WWgc)R!4wpk16{=k*Wi^!#-uyS zRW~Ie*l(J8%vOSm;!*7Ywpomo;50avQ~{F*<|}`Jct%1(!pFr0c1HsMo^9E<*UiCk z2>-HNw&~7qfHHBqbbw@kPZhW5;3a0f0*Pv884<8Y=&7{%RUsw$P5}u{f48cFzrs*e z&d=iZRa4e!-ynfYdFDH(%GUMw@98Sls)_n-L0T52xBKr>4`oAZDOw1W6R4OD zM4)N$!S1kSmU_VLHCh{)JL4n9yML-qRNa6ruK*9>?Hya$d0 z2p*P!wgaPm@c613I!7W(UgmOHaJC~L!emq7;o&LS6lNiCK|=(35%cI8njsfDkm|S! zWLJeOlZl!MrY*W8+_xmHFm|ReAB9ZNpK+}eCnkO81KX{qV5_tBz7LoFWte!8 z_E!~mv}UB;hzcHSW&p6E0DT0zF@7gqH*_&F)79Z?o-0m!r zz99LlBoRuLbpF6mhvt`rG`u}el`_cop7!g;5X6G4w+|T_6zHgwaLF8@#w)ZeVLdUo zkOHY{A5_zf3>dbPKtE}rLQF)I%ug3lVJ;dzF7&nlWX4DZZfYDBe6-G$I~ozuOsI@! zYX{{N@A|l_$2@oXepae@;h#v;5{DHWz9rO&fvH)fCRb!`(Xg%Bu81j%F&_;dRvcd) zt{_W~oIuytxa$Nnr;EX)O|VT4P$>ZML1qh*JatV?I8Z|}5ypNGviOvWVbZ!mnOsy2 zL*;DxS6Pd-4@>c{E6xH`?ZIInrwN-J*_e&($(zA&>6NFBNzOrs3J` z(0Bj#K{nTr*&FdDtKe$bmWc?==uo zZz{h)7Jv8vz!Qk(o15k8NV}Iio{p#SXQZbS>EQt19Qf4tV}Y6i^RvlGF=l2{FshOl z7%IKf&zu4B)lau6rqZ?G86EjFg3G=*X~C;h(f{KDK$3V6|`eeuYGAsdFHHKUa^yUe0F}y(?4Y1X77!gTc-`MxodOp3x6hScl z*ZQ+SEX9;TVSu_UplxTPz~{lm4j3?iI%_w!i52v-D0gXj|l3zDPe=D(^ z-FdG&LwS|m3!?QWmn}Z?idz|Z;lGBDI4wH(wKbvwd5JwveZRkI4;TJb+S9Y5%0Xhy zYWX?!l3&J4E_lm{kiVOa_<)pJ<;s4mP~m-wC|rXw>&ySnbfZsw$aTK{aftD8f?i`|W2?l_ zVhKmT#^bwvn_xV}7|*w8+mW_dB`G->h-LkSL)Zw~QlR2#9fFKqXC>+*PJUrwP^$h*jyB_mjZ+iet5+Hg3m*VG_5h+kd;2soDZs$y1 zKd*!2HDvr9xPzH@!6QJ4l`Mk*3S&6vfFIS=)Fk4u+u6Z^fmyw4yz}!ge{i_|7fDVS z3Pq4oro4ZqD=p5^6~q#8F%1U?2bw60ft7_tp+E;P>{m>8*}?!`1@TAfqfACSn4f0ERp=_SU0bGu)O5=}28`RsUZc)^c|aPKa?6sw;rGnmTOf3qvm z_kHa2`Ag4yD2XB<>%26cBm&#MhQ1DXlHA^5Ly**OHm$Tr^mt@Ex!1lm; ztW>|q@#oT#!hJq=e4{p{4)c9DiO}8y^G|`Spxsgs_`&i6PwHQeTww`)_MxPN%{Vne zeU$`=h5xgo@CL#Z9I5f`-=GT`BAAkZK8cM7IvsBXC7});$|%r+wQ2tM5i$CVi>-qt z1dj7}-@|)FT=N^F3aEGcc7JNz%#k84v!9F?%1C#kBjYbj#llq%uDK-09zq+i2fp; z!O^ym=g3u2Ar#_OK)@GnxiJb;EO~6y9?Fo}RIu%nnaujPv{LTgFi!?C$EmerJl#)6 zii;78kJ9H9cnG}ELYCbqVuq)uM_VusUd&x^6kTzb0N-<%(BfmtnSEL`A|TKeSjU*h z{q_j-KdyQE7!zf1AqU3((r=&Pi=5A&lZ8BWpLo_8gIY)d-l>j`POXmNxWipl0@54lYe)DN0nt6_p%TBejEf}{jw)7Xa9Cv z4Q5FF0oQo?bHlKo>MOPfsniib@&vva;vc`LD0O(SKv=!X!yD{fsh#HgK8-+Bm#zr< ztUX0qDp#?19>&|d=8mzThCPLq|CQN;jMw&t3?RTUfq{!N3Bc?a37ar%^!W{L&9M*h zmr7{<)WNqYF>u)F6LMe}uW>Q)uy6F-)|XboMtm5LYHYM0FNW8NXaNsrSCZjOYo%>M z~{6v!QypociW&Y|Ek8AYZAjlm5%1ZfU*p}7hnl$@uI-LMlzz|*?2QYrjmwZK6EQWouH#z~11@Evd zn1x+)mzi^U0U8WA&R7U|%;-k*qmMy^RS*Am*b0M^m(G9#gf^;QjFs%{wlFr>VC;e= zuk>^w;=KKF%U!h4+ zb8wR`U=nR%*xDy4#TEk63Nz3F+IkJpScBlW)Bb5w4Cqk7gb7F+Z&MW+VQWRB8f9lh zt2nTUfL#Z2wNBcVFh9_vef`}7GxKWgh}w!Hj57PJcx&`2Y!XS~HEKJz2;EZ{!`}0j zo?GQN&$7K_6tqO{+};HQ4>n!*=|Vn!qVvWb&G0LAHTv5J>i3;AdU) z-RDC?!`bIq=WV!VeQRKw-;IecptOwcbpOiaWojy>1dr(7@9i3(P=e-5NpBq9GzOY- zu6YjN42BN~f(mQvbvW-F_uhnjPm()X4DRpmhj06#xZZib9+;%a0$yp75g+AP@n%&e zP#(g_^`Vy5ClK2KR_Y-MdX%+4|A6yb4vf8*3}QfZfR7)F;0O67=!YEv_l2&~qt3Wy z&b<%#P;BZmpsfa#Q}D(tVCZ9~=cHM6GlakT6EZVpfhGt{^DH32fNRMl|u4-XFD%I;&9Gx+`c1z86WEclli z(EDg5@y$u-qtQY0&akWCQMJ9Ey*<#Wq2=u+hysDNOkuWMZKJDO>>X4kWWb0F=xMcdjh zD!r=w5b#!JFOuAy^Rj0QQ=mKZaC5N6gD7u&-z zhCd5rT`&+>02mmUW^mM{(+5QTFV5g@I)QE+SYSY&0hKlwlLZWJ%)*=u)Ei)RYavVz zKlGzar@bnJDPnW{!z4KP93CEGOvAv;!2|*!{O^!$`FMDMn>#2AR~Xnu*uEC%mEVZ{K#{y#N^)q#ZtRd4O!~1Dn@DCl9V-Ev9w1 zQo#2$3L~V4mjX+zue)tG@hKU9UKIzCEe-`Ij>L;6b8u=CjdFGZ|$BD?gT^sA&uvzWhg@>?{XOgthPARzw70 zHaRMdTjD3b%?>Yqm<4#4Y;Am)f9Xl__3GW&W;6}Sf=l*8O(@P_&Irg+?S{{n`5pH< z=f>aqlv<$LqrUa9gH{SQd%+?;kY^S`*lJ!d0R-3B7$YQr3o-Ck;(-*)@z&@DY#Vj+ z@i_&hEEu%9f+Z(d+a>5kO|(HY`Z@j~g&7GPZzE7wV=4kLEdhltI87Bmp9yH8=DVlx zz#;yE&78b4883CCJdxROyF?X>-pJ@E&8>$Xpu2#d2-ws%m^v4|)`2(s=#dmCzIS@~ zK@ypz}T_uQNT>VXTl0J#p-U*JvUa71zdK}aJ_umEoSNkpq@s+|7?{I_^( z(2NB{eVv(59?kqQS9b^wSUku!2py5>H=mT%z#+@ew}gWP)!U_q-lkRk6`p_67H7JK0x632OEeWonlPk^d^CC-s2EdiitPvq;d@)ONDpU1Xcym zyz1+Xuk5QOcx;!}ywL!3PgGA|KkUO&*f~1FKokOPBZzbATK&5N z2!mWfoi#o-)&S-ia7JM7R~ZOJ^*qu+W&&3PQ%YiDY^JnCzV@vn{NY8ydLDYDe3iF1 zWp-D=&b+_ve0}M-NcD}b@2-76>oO!~I$IuaaS%oI(%!%q$T#fu3mCb*(fp>Gf+Zfs z(_j1SO%|b4&4%;*=MN|WLB{V()Om@^Hta45a>Km3L?MvK>&pARN} zITHI+AW}R(+mNGCc7tb#*-i7==@F<41n@P0Tm&Tt2&r6q!Ikp_g3$275>&O-aAkot zVw4OTR~{Z7s3jx-(T50UusxOamC5!tn5*nV>^#NXW>8ze9VUN#{=D%?6(JpH^MUVQ znpF6Z6p|4rxWUvAUMI$$2RPH^&?wNv0Yw40?@OQ)mXK=K5<>*c7&gR1Z4O(G`*#1p zT;CIHnIPod2+=h#NJ&nHes=&K&J(B|JUvA=+HQG$gM;nP~tgqXI(*D05EfxYknsXQJj}E~u_oq)?n$Q0${UaCNF5tS@0_P-@GWuIQEyg(q z>e3qkVyd5AdjEEE>6{ye0oH1iVT%qUE*Se z!gS}}zP@(psN54@*`G_Wtc#GL8jAcqN&YSnCO`?k07n>#id2Em!2B0xQ^dR_2+W5* zjj46zuC4t<}`jrYfw6nxmgI(OJ|g!C#26;!4OZE@q*M4FhyT zBVBaBx{_tE<>d%O6mZhe6~BaTr#;AHZQr{@-vRVaFoJz{1uCHnC{x2?<@bcJ0-$$m zgb@;0@UfJ^a0U{<+H2S{1eLpAgYr#?P9F!X+m%W0kI6pK(FuCI-wcBPnIn++Fd|^{ zS=Q_S>BDm1qZ?jDz<2nX1$(4z&p$yL_?97cV-`OE4mBq2Sj6N!{y6X4?7eT^eFF?D zTYq0|$_4V|*@OAIr|)3<-|R0;1*e#)qvr|5GDswbcKRU6XuJ`&g9xxh1%+-Le7}dK z(1egQ|AVFCUCuSIwi7yTPV+C=jWlKlv6)HlV|rTsxeIT`^u#=)`(QBjvFsb4&uUMm z;=B37?n;Sw^PVwy8)Uv(dU{3$4-4C)^{5KDH5z?Ino_iaFS+tGlIRZk|67gOp1gnI z(0%gU=KJ~P*AMP@WX9T4+i0!0udYsH7sUR{JT&R(esu$C)J<1$*O$&Uv-?$?0s^pV zRx_1fK}wqu>Q7kZ*)o{;6v(_K;Ja`jkto2?CukE24J33(UgqYk3nJgTyHDWJJl~m< zpnj`-vj79QJwMtEw1RyS&_hC?dp8>C!w2A5#piF(oqpw4gE3yIoyFtF^B80f5)L$t zhg;i#BX2Btd)~WxQB+n2A*5V%1mo}lB^mIK%QDX9aQ~oOQb< z3srLWE$1Z695GA|diyR2mbUGba8KdVUZrX?dYYMh zwQ497PGb{w>An2AA^`y$o`3ar7u;@|LsL0ZCy97zxGynf^5EEoOKR}yr`!EY%nCId zGuk0C|I4lW{y&Cta5!@ybRM{4f|c=d*zLiEiHqE;HELqe_a{ykFR@ISl9%WWlXu}_QY>6Mza+To3w<2xb~+4 zA$<#>45_bbslCLgm56%KGjB>6mAF`SwZN#l`rU zo?jlnEj5ubUe7+8C79r-EgKZmW}}kFGexL2^Zj$3_Qz}ueSfzmP3T>Jrig&73VD#G zbmV7|*Mg{s?hI=z1ga2~-Ga+ZnJSN~B2S#j5@K@M*FvkP$8+bV=Ihq21aejsnxB^? zNDkne51`cnJGAZX0qds_PoNNwc%x?k+=k%h%4{46Mh*#85MiKw%1K_r#jJdIHQD z-@bWMEb@C5Zz2nG5PbAl^3zs{=<1EH6{lE*&VF=N_h(82^9{k`9f#jo*0_Y96p0|LaQrwJKN20{? z^Lb}4(7Y)thq9>yBMC~b)Esa=vK3Appx&8N)21YTPg8y!0eZX7B_$0xIpA8cY#dW+ z8Utp?F;$q&!j0$0a^i3*U78sJr$UH{jDr?W};CH1?J1)5> zrSs_N5r-8<3>9f(Ryu+ku(LKy%Ag<_3%cQ)f88h@WB-Fpm#Gc~t*! z71tV$_Ky)ChCi$a&G+PFx<(-=Xqu+ojOwB3;)0h{TB=;nB<<~*zDYd}a_LP^eH4s% zqW^1)b7nfpnwe(PV(6u{$hXVP$v#m)e_+W)>}#WNUtx_sH*N3wHO(V;o97mjsN)I9 zJQyVNKi~!;ggggH0)xCEqw*7>V-rRUCEn*XHlg#l$jZZ5WkjMg3KWPuQ7L7b#}i<*ek zU+w1azrICvQL-VA@@8|n?pRqBi~{>|J}UQU+bg5hbd_vf(LMJArw1h*J@|*-skgMa zSdbQW=JnsoWw8woW?@s(My9FLT3n0!y88zUB!p44F)hFTTM>>Pj)d0+y1pCSzB$fh z?uM&IWZOoNHO&Nd^*a6FK!(KIn*DmbgFkdYlrzoyW8ViF3UK2G0KD7WX@hx0deag-)4`8$QRKz&wHl-~6`8TYn3c?S$JL9oUgBuRE zwtIhlL1zBYN!%!TKV)vnbx~Fsz6#vvu<#*!_K*OmPRF6thei&*hNYz?*iyus>Y5q% zhLsQu;g?oakf%)7T6eYo$7qg_Pau&C{NpVR!yCes7^xJoi&i;QLS;olAfFfi|Vbjs6E(*B14Iob(3er3NQbg>nvD`y{N4 zNUr?M51rgro}lvU+cLlZXD(S-irMXxDJel+oVYqb~{VBd!fq0!6TjY~lt|i6-Ecrt1y$ zM|6}Xc`t9*<6N~vJ8UNVi!X#YKa;ECAL7+GVmYH4n8LPcr$huG1#whx2^R^9igN&F zRShR$1l<^1&y&YB3zCcVB)9?ZP09V^1)a9ZUPxq z5b?|eEM|V0d{PVMu3v0<(qI&F3VW5o52qOlfCcZvd#ZbgOTqwb2^&js!X-{zV?pBE zfulA=XC(g2^I_Kqvy-mVM7Gx%c03g5;(t6FenwI2*glL4UbzQ3O9D6Ywk%e-vBjF1 z2_3qW&I-L*)M{*7#t-hO->o5A_-OTq#ng#1RyTTF zX{t`j=FhjekB{rYOzgkj)hk;!3)e?n9+WrmAGA{hU{zVS1k^9KtQj83JuCinTsb)D zTkgbAVA&E7IO_67+Rxa(l4Y^|0d8-q2{SY9o#i$C0avfGi7o9-O?kY-*0NX_HRQLufcASd_W%)`;*@)Yr)FR{`)p^&$EBW5pV-iT*PY*<_dJ8@Ub*Rfqk zuk6+21$quhv_>Ix4JI7fDFU9zEW0$n(2whP=F$JyC+AnVsngRly5jsswEVdN16DRO zvV%CA-xOP`Ej(P?l>8drN>XkUbyc^i_m0C!uPy=$iGzCh?=>^pbU`*1O^3|C85&ZZ zv0;PIz$4Tp4{dS#f@F=<;!$~%M3-Y3wf1U{9=~eup`)_Hf$?YD>{r{*re$qv71={R z-z|vYN9U_~=4A_VM6!A0+iJNf5Z~nt#PdHn5oJ)IujPrj_lwXG8}CrfoHkU+=Qa*A zuA1Suh%u_~q-13(i3noNOvy&)Uk?!?m8!b}7OJmAkL$Qp)C_dF1LQl_O?bvN=w|*C zK;*n<>_u=P$x(>lS5*}pK~p1-J+eF0@^{OhT|c;HZ9i3OvU7{5u3U`yT(wo&yo3@e-dEe@j<2oh+XMJARt|yLlZ8N7tADcpm@xu$=H?L~vtD zKoR~g%ebZ2%D9Z$zq%_HIj1?Qto)dH_vWkD>1E}rRz$GZOzb3bDP`_Yz2!yT`o{3) zjAgF*MHw+RPY6<|()xR{(h@@xJO7)~T_<`;q42$-y%F?;a!TXpp^p(BMS`fv!ducA zf~NTe!;{B~Ro84~7j(Lm(0sWAj z6Pn03QQyp%)c>Nt^qWQD$y~3VTlRXZ3!BIJ)4C5KkZaCI9~P60g-yPTG!K?xJbkH} z_b8%*B+~n8UdtZO)pas#&j(uH+q{HOx~PU?R=OD6maVX|xD4Z(|N36Fo1|+@o@i^j>hI+d(+RTKxeo(PcoGE)Hr;soM}|jWvb!D zZsy~x7Z-LHk!OIj#L~-D*odVgn1hrMdi_+`+yU6Q0XnP^0o+vHEd@G+#PTtk)TK2> zr{6v*p@KBq^GtApbam5h6tY`b^B?FH3R1mfy)uJ9p(DAFA=o%**LH6AQe>pD^=J~( z{PmwF7t`_58csQ$x02+M0ffxeTSnOONTP=FXA9)`Ns(nzu9ng$xUej_j7g0 zB9xH96EU|Vhxod5haNh|@7Y|zNg(5MbBKA2UP707(NcDnlN(*sL6*SX0NPj z>lv|;e_DUhi9~4L`NdC)#Kt0DV&K$n8WJ6H>N}!%>1;IhC_+A!*fe5<5P?iP$AV3y zjmg;)`ur3{2|f zn-7uE)gDB6=gS9;X<&^!;1zPo6nB-EtMoj7pJ*fIJnelnl~Zn^a_=pFq0Jw0d~%X^ z-IHs`V*KPZCvwl3BPvv2)#*&z+v8u4(+D+eAAbM*d#3BuK{jyRb5W>Etc28WO9P>W z$1GnYpI?|J;rreBnXem%4K^}RBJt*H++|~c+F3JHt}foi9)vg6%_U6Bl^@|!8|-KH}QwJ=}I1T zd7^hsc&!u!`cMPd|9jwneCmPrfAcpVfl9>3E39a8(uFDOrf= zrR|T~gxn7dDKc){OIY0Ha;UWRxQoP7p}nf3NmPpL-8DX*c(UJN#H`T6$Rdb9-_%l( zN8Lo=ndYwIbPq^<| z$ppReBkmzXDn?YxtHo*}F7yo?YHv+KE${D*HZGc)PrmmgzOz!^n)DwUVwsEsMuqJ6 zct)B}<uXwkY?4>zHs} zrQJ#wj9s}nx++XtgBUn8A|94=xi~t=)!e$YdSCP`tg!8oqF;>momqL8(!N2iP60yV z2OWQgT!;KqFEid%{Px>DG>a>4f{KT4Sc*2>~XB0W5hwp^u6B1D2jnujGeu~h9?QH0cVu5{xVoAiB zqN{E0e)Z51SLIIOONIr%e@>tS0)s$?_T1^mw|vtl!WK>*cN^xBI}!aXp(<;Fg%sLi($fe;>jirFE*NR z2;y`lwy%%@ZRE?JO(?`cvH9ozeY~da#Xlc{m?K!pnvw?h{t?ih)hnLHDT{x$^9lL$ zAJOzjT7&of5fR6^pC_>TNXv`@++I9NO>`W@`(*WZWPjSXArct!B z-M`i5(?dtqAV(f1G_9RNewkkS-!BETs3J4++qm~pTARvW1yXv?RDpe|?YeHcR0p7j zSMY}iKJT<)Fnmp<L#djD0;;1zPE&o%yGG*%x$M_}aL7c$H-N*5^V82d|j@>of$DSW1jb2ikx>}Cb2YTtOYI4+A2K8_Zp%1sJ31ylf5t(EwT z`kj{6c9{7#xn4Bnb|CYicVKV z8RBd0b02Oy+(qDLm#8C1{`1~s;dg3^u$y`L@lQ+S%7)j@{N?zt1$zNi2$Ca_Wy1O1 z{kyyKA5?c`Wk#j)+1O^4$cV8c@KMa!uNAO}(X(R}AFFw+?lt3$R9Wik1!Q~ja7)@| zEpz_j4k!*N31Wk-^^`wNL$UK(5Nu>zIN0*Is9=lSiU6H-`2btTCtO6*p?}%Qi*j;< z*s!lmgp>~ns=NN}bE?dHN#u0FiE^J z=z^bJ6!=`F{QdoqMM8Of1>y;ltH=3V2r_x*z*7Q_ylf{{(Uc2LNKU*EKW923K!u0nq1=g{78L?pMR8G0N4u3P zaF&Qek=gpH>{`h&2WM*a%D13fNx6?!xD|?pA_7${@nUo+oCu1dgYLAli6r&^dr)w1 zrELL8%tZuL?&ssCay-O_d-u5UDs${&LKfA`1-=(WKPZ$tC=BX zef;Bl-t!F-CY_(3Q)5V|u~-dLs`naewtTE?Z}SlkG~c7nh_n4Gc;(y2*MSPSBHjP? z`0{QI+|5qg-s@|-qF)erklyeX8A!*labLl4S>aZ{(D0A@B2TSNuOYt)$-i-9?2r4k z%osnIH9SLA{<7gZ*Y5QJ#t&;&4>LO*xbTxabqVpy=~Ndhg_G%IO_YBT3`mBT-LX6J zTcMD>ufS=8FTgK^6TpTisa3#icS30*^1k(~VO%3Imggc(ZtubSON9;BHuYgy<4qg1?9xv%_6ERt+(j{g$o>Po)5X=mfUiuRwveqz0=C2 zT%ef$$XMz8JRTPf%WUeFA$ z&)UZ8p@NdM>AD3^&c@1yo^IUf<#8Y!d@J>0UBgbjZgzz%(v)BA#lNFodTG<;saH%o z%ILsTy+rLH+Z}tB5L;OSJX3Dr@pJnnz?g>bHP9*`RB(kC70CnfI!%qEnhyu>4`zJv z{?T7I^TsEblER`Ihh27>lTL3~o|mI?n%+X>EA0RW7V?HXRZ`BRZ50;_;x%5GE;F00 z2=NRTflqyA?9C`%qFk;>*1`u}0Rs8V%6fPnRD-D{T)6VD*_sD!J{#hqxh(!)duJYw z z76&@%80Ri+_7B*3Ftp-#?YT*L&TLjWhuwd7vu3W}szjvI{Ny;2H+n~^cdy@aW#02g z4+ohtK#JSw^=QMxqGd?}K?#i7h-L9hK z83pa?mY;e}kLzq_MRQG8`7h<6I{PLx)$703iS+m6?ngj zXAr!hu~aqan`xP7ZIo&n1DIKUeiP%m&&yamSVRr0buidUpK z=L^>*-!62o#5i#r)n=uu7S6~#ta44|=APht_X4k;^s$^dcu>^qcWI&)XQWb_hM_H^ zxHaEwE#oz2V+$WACvza9&FbexInhQt(OGb|Y{a&Nk$J}^8bVAb$B8wR&@kMpCUGtK z_gsnQ_Kq6*Pewni6gUkz7?p15_zdI)^yirsoKiEj8R`p1hOn=~1Xn|5mS(yqtafa% zxtAucLN%n)zBYKxXp_s&b?HxDmenDPr1aG-BIg7%bJKbM+C}8#3aRO(Wq%UpWelLI z6%YDpmQEH=+qJTP?KL~PFh67LH7=8%?QlkjxlXB;mlGG@@RhTRX4x^*F#chGiRxIP zMW^)Tuk8i;=Is+K*O_g1P-%zz@A%Z7WPC@gd6U~{xY*_0y0#{5v%h?^|D)czz5U%u zn#=1nYL}Ds`7Eswy`K^?$H@r_-WX)Ny9o#Z|n6M#_LzNyC`t(rbdQr zS(lnL?{;*1mrcG=b=%ki2})Jgjpb(zK0E0c$pB^JNP3ZQbFpy!t9tm&ho#TnyKap9d$(~8`jL|a}T7bq@El5xGi;RN#eJRW!Yes*aPOBo3Az=%e45p@1%2cTSMuB#rT9s zR=Dn67SCsvAJo~Db(eXy4rps_Vd)fq7I|@wm$x}inBP5m>=-I0GTsge9ngh}g=$urB&^y8?q?nxFOEu9oZvx5n#*(Rw`n-ME-w}4JLSduy}$ik z1q1yj9fp=W13gc7+7NUcROa;?wwugwVhwx4R8l9W`4@Cw>xZw$pr2BgJ6ffFU5bPL`(K=$Ys4P(rE6rLAN#8e=cn=OdMy} zLhCU9dvU6@(eekU{ocInD!awI8_!HsIW<}bT;HofGjigLaeRpoxc2DALO~SfU9wi* z8_z9{$4UaCE2#hAgIkwoj$qDarl(vB*e^mR=-Mxwm77q%SV2{b`$m6(G!{l)`G#-BQPdi~cAk_q##Bm38?(;cEddGDLwjl^OaPe55MVRe zDbM)VdE&i$McBvOJp$2(D;k~A3VY1-{6<39NSe4FU}9-zZ0Y4N z^G)$ry_QDmu~qhJ4~`$bJrzw&Lbc~8{&C$u>$+|^=1xAQZz^^aQ(9d4F2cL#^kCR% zb8o)I36BTV+xCx1e+S^Ku8{Fx6g=wBNB<_G@snvGL`Ee!SQmxu_8je*1awDoV6 z%p;r?+NFX6asysI+s%-eudu2jpGFbXWcRGzY@@lxaaWE0@VDsZIc@8e0Q2T^_eOXk(7h83C zLp$OIzXBtL?Yi%WyRDsKO_5TmYYsmQtX=cmBCL9m?$g83b!G2a6wa-SnNu~{bCSnr z`~Bgs(D>Hcm&C`#$^7j-ZCq%s#z>hZ{OdPtT=RA96|u2eT2IBo9a9S)xWp%p?`)jv zaPDV6LDY0nDIVgTT{nydT*@Ft9hCj^2W|8sLPF}Wyb2FTbNKZ>CzYb+mFaBF?RvWn z4Jo`Yo$V*%U(VL&6;(Y(-wrPK;ll!J3F5ehhMQD!?Cv*?aQU?HG;ItUP5oIF2{D)( zy7u3HyeQ>He7dk^^PQI#G$PVBUQU@Y5n69mYLEK|&oL!a9<%VDG7f6YRJtkoFs#;x zF^0MBBFprFn1B9=UciQ+s)SxynBKq<6YBZ;$pt<2;m@DZ*$#Ju17|w#qXCaXjD^lr zt$qP)`^%4CTwGiUJ}X1#e19bEN%2Szsw-`g*ty5q${XBF@M1R8)y^y(c#RG-F*H{~ znK0W#AV&;y zW-7F{ad!GmLvISPjgv-cGLbRo_fSF9Ic`7?2`D(wD>&(1`MpgmSqgT;*aXl7PYQX^ zy9&oobdumjU8Ru&qs++YEgBlpC~0Wu1AQLeSYR1CfQV1%1xD8+p<`t9fC~A%rkNSn z8~KR+rU{WMyXKrs69h9=GB8}}su7>$1k3C^p8`+}Ux5-hk=YCp5Q?lJ91^G8q)1u} zQ1KST+wYnk{90>`TbOh?y6ln`RVF#kj$e2tYWwx4$~!1wAVu^5Rt9EoYO&}_vam?* zaV+hhM%yoBDs4?ozlV4Hy)F}@2U>r!R@Q)vww4x5g_P7dB*p87zgt@?DC@3g2y~bC zE6p54X$X3dlW0sU7Tz1CzzE4;veYHB=iZ)PUQ)!hBMU!YL8}O19bYoy%FC(l<7Si#Z^9qW7^yxzELia){wNqsMc$8Ps1cJ&NnzEs>6EW_* zfLVlHS(W3-xxqiLxzBn3`8CEjfp*4e_gnGt+hu)8r1wfskuLzo6gk~E;Pjx|Hy6Fd zM8E^yC?BmcUSPwZD~!ViT~k2QscSlasY#Dl*E0^rv`070=g+5{PhGkMO;a9eLG|>h zJ%#s!OIg=Mes(tD4^9p&G{c`uxjo+HMllJATD;me1`Y{>KfW6u0Ggs@SB5s-U6A@b zOqz*eK1275=KswSj<1Po7tQLO&MHKFR=thDqH_uuHTv`v(RS@}#s>?N#3(yYG62&LvnQy3t z_)o6AC^8O(h7+9X@HtK{Kw`8AQ^v^9PxeArl*RhJcTGL$)wz#IrtC}=h%ZLcgBi_cp9nPXSO z=N$Vu6e5MIYj{kaI3;#X@L#*#YJaOale(_*%c&hfih-{NZ#uAXg|1eUWQQ1%1Rb0Bn;V^f1dPD1J9?D(J`}h z_|Ty{t@hL0rIMvp#n0|r5j7UBrU}5w&vwI!)C>Ak(xx5?J9>EsuN*48OVEUb#+>TG zw)pD8ubibDx65uDr+K(>`!p>Gz3!Einwce9Ie1f%QVIA#Rt`jQQZpSEx70aAZ<8#B zjY78@*`Nn^o^{07n`BaAbLNa-!t2CAn2Dm1i;@PbBWT9Bj(SPVeStUy;>TqO`BVc> zMxj5NM12E+3Fib=+26WwrBl*%I=VcCk$E|fVK=DPGu%3vaQAKt`joQAh&@O`Tts%daAKn6ZK!@G+uU`uY2(TzT zPC3FGd$J(mA3b$$Zf-l}(9-^Wlja$AFJHe-zaUP)@wj5ZCFF%JTq2P3LiGgEQP7yu z3{UooT}j{1V)>7}q!y%I4@NF=XAt<{f&kS2V5_d@8ULl>M?wHT9 z)aFrziC+5>$z(0x=E()L)tx`G;#=M0Sdg7v!v4l z{Ei)M1B$Mg7eoJU} zN2nUmAmy`|#)=^uF21bSSEtA^)49D3O5Cbk9?*+F`jv_<8Et14cZ$Pa}e))JZi#2eXDo8B5mZd zQoPm?pNEGZeIYdr7IObi*(i64&H95`=Ay=^vneXRnDJezguheegC}eR=s^q>y)=n2 zy^|uMqOS{Vfpp-mcVLO|#81I)-Clt{(_L3cmvcvBK6mWAP`~5Tppz`_aOe)UJ_3=G z69`Ze?KsHb*Me{0Do=dgow7W{3l0V zkc`jCw;L{2m=}^7oHT)_cWpMATUw_n5NIs*_;^BB_xt;^MHK%RLvry`SlXn}irL(c zoUH%GKh=`ehV* zA8YyNPmiQl(qfK)Vx5521_2D4I|=ILJOz;>l3ak3leJDGro(IuqM-xOtU$ql9e^M{ zxGZ5Ml$4afG4119_Y150)N>kbze1l1DOyNbzR@JCW3c;dBGATw`VPfK5J0Fbb(pvSA?=UT3h0{rltk7xz{)E;*H(08Ac-*-{Ey)(85Fh zlP+E|ICGd|Jpx!>cJ{mSi>D!~K36`PkmOP!2rju{DsO}Tjf;H!rF~8f<-ZrFmuFvW zx`kI}q|qdPa`>zCbJWT9 z3x@ZYkaa&k04&4LmIG~2WVzt!E8VB1E_?>3DYvjt3vJgTB1iEKfhX&N82ww$T(PC- z(_Ic~v2_d2+owx<9RF*K_d~|iaU}t?es%R&{xf**z!`k{z-r8Z1I6FIHeUdg>Ev}- z>C201tue}51T?S*3U2Zlagzd!{Eo;fur=@LD@smsJ|3li z6U&gOy^7I1M9BM-Ohf%#0>Z$XH*b7##7A{K@|Uh^=nwt*m$jQh zPPg8%6XU>_`~df*dD>lHef!3GSUUx8+Od(y&7C_s2pJ_(R*D>?C-_06@{Mm9aKA)} zGXPfTA|1m3LLKJo>&rCuc=P3JoHn<={rE*EKaS~)Z@`>^>(e7VPDaO5{BjU8T2f`v zG@^I_`cln64xZs!X3knNWV&2Ux!R)G{LaJN)iHYz}REWfGYEq&`61Yyh3ek;2UA7&?X!l$6pGkffy>*y#%w>0o?C<}>luZ3xy1|xJQ zUD(3GABdTOFM3{&nT5qOgaNoZL5H5YGO`ETeM2BMGka6e?(>w}(yaPS0!wv)`sNeE2CflkDrPFM+ZiC(Wq-|96}?K@a4M)__4~2IeB?E z`5WQM`ph!q1z&SjWV)A~h(55RoTLE_Xl0S)+i4I9>3vk;^v{f+d@wxeEg z)m|{r0=C5g=5c=n)$|k%31OPiFp?s5VPeeS!Ub$QLZiT#`kJ|GVVPFOj80%yvjB;j9Q+mP=V>Zf@9tGdy49^NL={ zGY-`f`{`m9MH%RVt=A7B^5Jmlhx!lFa9U#mjdF2%kME&`)X>mC_#-EP`gav5a__7U zlM9e*Qo~#aF7;m6&%GfYCm?t|#e$|Z9F9YG`zr~r3E&wy?olk9|2^)IXEZadUX=cz zyluHFd0@|;(-3LH0FexYF!cETblRU}Hk z20+t5qZKpJzLArda<8nluDQORfNIQt&$F;}kZEAbc64Y1e&Gv%&lKsblUjo*8~0zp zTEme;gi1Xxl{|jm3t5cp?~drgs%5O%3zGk?*+B-tIuNDLoI&hTI68vZT@_@m!%QdI z8Z5`E+}*2i@XT}ps1L-${%Kggp-yKlhNWM9@X1pgd`MB^;%wqs&ofr?j3hTD>2LM3 z%J9fsv%g24e_4Ij-FYulWd0dzZaXXUL$1dG@^V{7)B_o|hVK6ybeUy? z`YNB9t`;ohpT^c2>l?3!zy>2@a6yVM39*H%ecKSEP@J}bkfVSG`mcuP4#cs-R?k+H420sj zzt~-PH{ph*;Pbry)FY>lRliW7#KXDG)h*2tYy%IF}C;WIK6H*&Cc zhM5Y&`fpE?>z2@K^}~>@H8)pGxS55O&UfzP+gmedn%dSoIA{e#!*VS+N-4*D%*^zZ zGdeI&bfVa*=*xo)949zUQkmK`Um+%un)uJ*`{p%YAu$Ip2nINL=4nO0o65rpq9rII z2|IkUte>x?RRpqEgd0rBIWoZqohg8xDWLWfPM3ygPE5vueFWc z+-b}C3bii`n<#gqEorN(4kJM?A!bJCrRM^6yB`<&il_?W>ftp+h;m4aOLasO>lvR#f?LFEo+C?IFtjC51(81z9DwD3GA3PJOF z`fxR!m@1uh8UcV2Ks0FaJU8I8G=MRfMWqQI0OY(ln$(K5HxI0Wo|EvvZ7_0%pYv?D zt2OE@8gOab>+0g*a0Im^h{rG|TJ4$WXjrU@;JmD-mmcTn>4kfG`|Zyit*nf|+$g;7fm-nVFfGAo*JoT=8B`h$`ZV zm=9p<9(H~|kKpS=dUlR1KcE^odj(V9{2tw+n-X<+nq5(?w-}v zS*R!oe0i}={l>lm{`r_INcP3~lJg+GN6E^;;rJnT`oLO@4K77GJN14Uqq^sUV*=PQ z@PrK4bM)}=aCB6En~EoKLjS{~+dS2oA_?6WM&lDkw?B;2%GyFc2w)}9QTI6+Ei!0p zYK|VW#rh;t)icA2RJ{n|PciUfLpaPX{_+MttqMh|A5G!MKwwR{T-g<)E{TvAFg;4)a{dGTW< z9LJChV9Q52;RCerm_vgzsn~KA6$7U>>Z#)*u4=Z9i6nRfbVyb(0YkGE%f1x?7K^0n zqnsQyE(bfrv|q;B)rap2l8YOM-4{a-TkUVAJ7mVe=BOm#pe9l&%%nMkh<3ui99umm zkKiScRzhnNHB=n)?;nk440(Z5`iW}aXMb>o{$~3p`@^-l1qDa_{#32J>+99n3@gf+9Aov9+emi#XVCk7I0hnnmcxCd9qJwwa-{l(GP1h9e{dJ}P z`F7qnar)mE=t41ch1w_C#q4Qy5HEN~=z_4C)h{<;jmh!yX3@|L$O7F(hXE$FBe_^1GUa(!Hu06OoYF5>&z0Q*f?Bk`EKLn?2taN zH}f0LgCQ?Y9!i2CXc=%~6LI&V%^#VdNYfPW8E9c+GYl@>1yBW^6|4O|OU;~r5`!Q? znjfD|^>GKS2YcgshJ|PZO4L|)jP!SR8)SFxS>^o<76`uN5KHK|>ZYcd6PR~5xGBsl z`b@0;upDh^Ig+du{t7!iURiN>Qc~V>JMpuuP3(MIl5OdmAMFf}<-?T__LHw~dvgpN z7t@X`7n_%q{mGrY*yc$w5!|hvr!6!1(8SD7S5^g$Mdc1>&h&wqiigH2eV;`|oQA;Y z%U#ujvIh=q*%$(%*JG4>pi#y@864ZW`?xp{W)vcUd*#A+tzeXiWKpa3*5Ffer9kI}en1ZqB(n=k*K;m}SutM!I^-=^Zk# zkF>C`D5t!eb|2F^Nqbv*iy<-|;-?i%8*xQIf((JoG7$#1*}&Aa52*=GIqmIfJCybn z_f27v;Ib?@Bm`3s8O5rvALjGN5hocQ*^6H^prXg3(#ypJKr&ZQAhI+im)YIWaNNpj z5Oy%wx8jcT+pep`Tmbq3aCFa0hJQC`c8OY6{Txg6!wIMFj)8@R&x?S3rJXVAnNrfy zfwgJ<(=Nz5^73rLIH|k{#~d9F25+xqPM0A0GAk&nJU3>)n(aFFr01`Q@gJtai(_;& zSCzxr20BgH+kNBs_0oSAxw8o;EqU64j@+Xl8^$M%}R2x2{J z4ap<|eiM}DaKu=X`#<1g)Z~26oLPKL{;f-+JRU!*tx;-0-BTAD`H!8nOvKN#_n#sI-UCE~I1=gNJ~$aR?0kruoz zzvVxBN5CpiR$Jg^hlDfo8y>?H^TF4+r2`;XLNh5sWo+wKyk zAI@pDwY80nYSHyJW&L*apbrna@IUYKN9?!nmHlT^bpP*^D#guAZ*bcMV~D7aG{o$t zMO=fzPbw-R0$&do!x#iOtOvEMuU>Smkx#u`;yeU1O;{XuZ#rFKs@Wx>2@ub3J9WX; ze_|W&dy6e8m-Go|5$iJJ%xxZ-Ah@_pF2yc?dElnjjgS_5ZDCNk@YWr1&edeCG5ERT zRx0V-^K#q(2mE1F=#P85M;Hxwy2nHxuY^q4>_F z?hg;gar8%ZHuPojDdD2Fq{I7?wf>p3@2lL~J6?4Bs)@v1f}oKidja0|NKD(88n}Mb z6;JGL8%C-9iSIs1QQbwcX2*vzjjdpTKbv{n;~1&5(^Y23d4RQM!)Ao%_GoE6$t!M= z!H43ps!bl93es}~Db%v4cfw7bcq;)s>{uZ&d8s2*N#U)>XnfV>h0aqxuM86cM$h&v zI0>IEm`m_hPw>+exS3!3wd_U+KJK#qK*cVJRn9ofOo3#rvv<((;F_v$VdZxysdd8b zJthy!THL6HzsmMwSQo_NKm@(KT0;=5<`P^$H~bycrmSp%_5#z92)1rl&?NuIzvKV> zjU0h4KZ4+(p>U9YS~JpFlYjd2{r@lce;kL|b4}YkXu~(}-9vt(nzEKsp@Mb5{{Zkq B9%TRk literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt new file mode 100644 index 00000000000..5988cbca843 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt @@ -0,0 +1,17 @@ +torch==1.12.1 +torchvision==0.13.1 +torchmetrics +pydicom +scikit-learn +tqdm +pandas +requests==2.26.0 +openvino-dev[onnx]==2021.4.2 +onnxruntime==1.8.1 +numpy==1.19.2 +matplotlib==3.5.2 +termcolor==1.1.0 +dahuffman +wget +tqdm +pytest \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py new file mode 100644 index 00000000000..7764babd8c1 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py @@ -0,0 +1,5 @@ +from setuptools import setup, find_packages + +setup(name='compression', + version='1.0', + packages=find_packages()) \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/export.py new file mode 100644 index 00000000000..89ecd955a63 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/export.py @@ -0,0 +1,33 @@ +from utils.exporter import Exporter +import argparse +from utils.get_config import get_config + +def export(args): + export_config = get_config(action='export', gnn=args.gnn) + exporter = Exporter(export_config, args.gnn) + + if args.onnx: + exporter.export_model_onnx() + if args.ir: + exporter.export_model_ir() + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument("--onnx", + required=False, + help="Set to True, if you wish to export onnx model", + default=False, + action='store_true') + parser.add_argument("--ir", + required=False, + help="Set to True, if you wish to export IR", + default=False, + action='store_true') + parser.add_argument('-g', '--gnn', type=bool, + required=True, default=True, help='using gnn or not?') + + custom_args = parser.parse_args() + + export(custom_args) + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py new file mode 100644 index 00000000000..241558c308b --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py @@ -0,0 +1,39 @@ +import argparse +from utils.inference_utils import inference_model + +def main(args): + config = { + 'model_file': args.model_file, + 'data': args.dataset, + 'gpu': args.gpu, + 'backbone': args.backbone, + 'split_npz': args.split_npz, + 'max_samples': args.max_samples, + 'gnn': args.gnn, + 'checkpoint': args.checkpoint + } + inference_model(config,run_type=args.run_type) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(""" + The inference script works on a folder level. + Provide this script a folder (--data) with full-scale mammograms which can be created with the dataprep.py script. + It also requires a model (--model_file) file to work on which is to be produced by train.py script. + All the full-scale mammograms will be processed on by one to collect bpp, ssim and psnr information which will be averaged to produce the numbers to be plotted. + """) + parser.add_argument('--data', type=str, required=True, + help='Folder path of dataset') + parser.add_argument('--split_npz', type=str, required=True, + help='Folder path to the full-scale mammogram images (test)') + parser.add_argument('--gpu', help='Want GPU ?',type=str) + parser.add_argument('--model_file', type=str, + required=True, help='Name of model and log files') + parser.add_argument('--max_samples', type=int, help='no. of max samples to infer on', default=10) + parser.add_argument('--gnn', action='store_true',type=str , help='using gnn or not?') + parser.add_argument('--backbone', type=str, required=False, default='resnet', help='backbone model') + parser.add_argument('--checkpoint', type=str, required=True, help='checkpoint to refer for onnx and ir models') + arguments = parser.parse_args() + + main(arguments) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py new file mode 100644 index 00000000000..cfa154e4784 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py @@ -0,0 +1,43 @@ +from utils.train_utils import train_model +import argparse +def main(args): + config = { + 'data': args.data, + 'split': args.split, + 'batch_size': args.batch_size, + 'epochs': args.epochs, + 'gpu': args.gpu, + 'model_file_name': args.model_file_name, + 'gnn': args.gnn, + } + train_model(config) + +if __name__ == '__main__': + + parser = argparse.ArgumentParser( + """ + Training script. + Provide the folder paths which contains dataset and split.npz file. + Provide a filename prefix to help differentiate model/log from different runs with different configurations. + (Optionally) You may choose to provide batch size (--batch_size), number of epochs (--epoch), + (Optionally) You may choose to use GPU (--gpu). + (Optionally) You may choose to use GNN (--gnn). + (optionally) You may choose to use resnet/densenet/xception for backbone. Default is resnet. + """ + ) + parser.add_argument('--data', type=str, required=True, + help='Folder path of dataset') + parser.add_argument('--split', type=str, required=True, + help='Folder path to the full-scale mammogram images (test)') + parser.add_argument('-b', '--batch_size', type=int, + required=False, default=32, help='Batch size') + parser.add_argument('-e', '--epochs', type=int, required=False, + default=15, help='Max number of epochs to run') + parser.add_argument('--gpu', action='store_true', help='Want GPU ?') + parser.add_argument('--model_file_name', type=str, + required=True, help='Name of model and log files') + parser.add_argument('--gnn', action='store_true', help='using gnn or not?') + parser.add_argument('--backbone', type=str, required=False, default='resnet', help='backbone model') + arguments = parser.parse_args() + + main(arguments) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6288d3d14bfabb9c8e7432b98253aa44f99cb54e GIT binary patch literal 1528 zcmZuxOK&7K5cZ>A^GE<`$tnvGBY{AD$q24I6bZCwB{V`Jr@2^7+nMPZcTZz`*v+W3 z7nTbWH;x>_k-y|CT=EwX(o*HlED^-5t}0hmm&^54_2%GUNMPkZ{aG$kLjJ(b-4(&+ z3|{*fh$NC~Qqwu5C^OA!KIcK^I+{nI<2s%vfv5BAOClo~zaTPJbT#;a&xewKL-OPX zVv(HsNEWUD9zUszg_@P9zPlJ~&fvB00a0X5C7ClxC3`{UTyhx!hXZ5+JeDbh&iotY zrm@a!OD8b8Rjy3n>-`#o5X1X9y!IC$j;v@Gg8s~|k}i?_m|Ug6Bj6eE82A8q0(|&9 z>9|bS%)IZ&HS305yk@^qnSpK890c8<Sm>+GT-f&qtskg2SGOsEdHc z9R&)tgKG?C=TMe$q4nmeXsdFql^;~~LNrCKtY-`7N2P{qio)7O<44Xk;`>4iXI*vt z_sf@t!qxKBv!?Zt-W1ZuPnXIlgS9XKH1R`cipE}SOl^IJr4p@M=3^gOr7z4Ox(sZq zD<_(E2inRha}cx+&eEci&pa*u4QMh9JwOi*w%>_)6Xd@>np(Fp0PnPrmAfp~(+ee) zDV&m1Ys%@?RoYJRd`wfB)ArJ5LR1YLEU-821ju2M(f(mHV-MIPp0SKFny`c>bVzrP z{s;T%K1hjev|o7tSNJ>FbYw+33fM450a3Ez9eW74UGa`wM;-5_6`%t?kxQ=AE&?4` zR+$8yWY7&CLRUaL{>XsxJ-xtA;td^;jswC>#=r8bxQlTDb|0S?Yc+98HK`h@o=r9v z6DV*!DH=I3%56`)P0C}BfE&u=k literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f99d3eaad3323268d9137ef79aab1982ec43b4 GIT binary patch literal 1285 zcmb_bJ8#=C5avs=;)l{EeWgHApqrNp$y^i#S`?kKBtTIuf*{h48pSe58cqUbYSy5C z!5;IM2y|+f{Dmy(9c`ycrw%2-<9Ej+dHO!8?)BOPTJ_^s5e9_(MCWef;JkxjK7f%# z(uy3Blu#m>=#Jgs$mXDwqVEtu)qkg0>WJI zGivEqU`dKOH5-=B1R_crDuXQ|!!2^gmbe9&1CEw)vBVe;Y|!wAp@0{_DR@C6R-R|G zv#YU?>u8~*NiMkyfwQt6IX)bXTxYS6dBS=}wNzC$Rps!&nzSUyc9m_zxOsppsb5G-Y=fIsBpRw+s>4LcoHG|2q+V~YztTfiR=qNu_ved>C3Tc~| z7A|UZ0!r9U11xnrMQ3N>?09(>CdLh$x1tpXsyH0i<;ddY<~bNttPNw)L&|7)6Ea5I zH~IxE{}&Mjhy_A=5fP)HC?aAM4n;%^qW-&x`T>-uAA)g0<~C0wl*FU>k0RdpVwrJO z+Ph-;z-fRPiGGZSJwbz{{LNtRmO;#Z1{r1p%zf@+x0oJzvU&>Pw}u88hrd- kMo+8HyK@WvklKqCy@A&4PU*;IHnE`Zq|V4?F0sC`ge$xkmhhfa<_Yq|3R|yX zzqR+801Zji`7S$@ijQ(3QxI=O!=aMHNK2mPag_2inGBC~5#y$d-^!hjU{qS*g2kw4 zqa!Z}vxOtv7Z!7bCtAQ=;e+X*^1psI&XtyGKSl-1uMeFzv@f9*eekR#rNx2Hi69H? z(~<(UOY7KHceP_?D9qprdqL3LOM7N#y?4xc^;`+NxeFTHxwo*0rCW0!B+xQ*N=LMA zTQe8958MOZ2HwKe$L)Dgx(h-qBXvOXR#F$FRwLQ!2iOZcmTWyyaT)s zysLYqU$)Dj?37*bqc7GLVg`fzMV#>uci^|dufFFQcy6;(62N7i>21Ui9he}7I5xp zD0d@~==11g_fQI{AdF(SQ1R|WCuy-OnUb*prqLkegMk`ye| z$q@Q`3SFz#3{-d)jU>;r>{-=GvhhUo;wT!+>BCVzSbjS@hYD7m$NRhM`2dFL<|_186kI|8W>N>5LBNmD(eV60v;YMd zZ-^c>v9gbyxvPJH&If7({57C? ztSEGoM*yJLc$)pnH3+D@`kq;E5b5}c7s*tHRNcjWc!iLr$6)@Bd6yHN~6Ui+krbc3c zs)BKZNQTF=U_B+DP7);xK2(uVU6^OC0$^m!G5O%t@eG|BTs|&EU^0NGidaWXMx%46 z902C4Sh{uz3aIS?s4upF+@v1@YJ9Rz`_uuBWyX0!Y4F!{>)lAg?Rz2F4?V0@CSQ3K zaC}rIspP!sa{hD@rS*)u21eBTND#pnJj3d!5b6rc5w-&F&9?&L-|4QI(XeGMqP|8` l)A3nh&L}Bj^BR~{?^0U%pVtn3fz_qJy}so`gnY zTRVcR>5A4M=oM9G+S~C|IBR-G-Vatx`}e%Y%9Z;H2V=PJ90Ij_0jE$sTMzb&_w$E)EXtX!+t(((R7=k{MZfqx$PV_9bTq%vH{MOny1 zEFbK>c$YhwRtAHhF;rHD%@TcSe37d%xlRpNX;L&4 zkK-&a(l|D3D)Z7XIZF|33&U*xz&Bnp);Y!wb}|RsDPmh4b(EC4aShE*jknE3`!iPg z1E?ds_3ux^N=ubY(l8OZzDcgaEETCrbSlD1jl;Rliz=L?diR?rvm4X>9WwiJo*?w- z5%D$y%IT2wNO!|}hhJEaf3?x04X4Z-{@8g`4Y44gBZbcz0!LX9bzs3Jnz>qVjGW=m Tuceq5=@~v!s5}G?oDAqsm3hiJ literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2d1871906409a1359607ff3f654c7d418f05923 GIT binary patch literal 5082 zcmai2TXQ2v6`r0MNi!o^k|o)eyf^Pc2 zds^Q0re}*PhdfZVMc|Pa^q&Rg3ZC@u5ZvHwWb})Y#inU6BewcBBa1mI#pQlQ z*DchYzN_nO9yj_8U9XIq@p6Az*PT%-Zui^zJ!{l~4@cYG z(Mr79U)6SRv=(>!U0r9RUcBC4*L|v^Gx6E}*}45(|D2BYeE&SJ@ud%p{sq3u>%8#+ z>u>NTUxwu(Z}B$DOT5EZP=0~4YsTi`)hk7+1jtjjQ! z11bC@B}cH7U;qBqu^@MA{%|P5p^tG!#c?u5~BPo@DyvG}99iRUVw^b&+J_6<>0 z#*7^sGZT+>Y+Qr&QT=1KvuWo`BD_6~M3@eS!smIJhtd!Bax0ij^HMU6Cx;Y_43l&$ zJ~l)Pu0MZa8xs=#Fx>WeBoF;t+q)r_1rhS?R0P{o8I98Ih*lxQk_@!IH95>3IUZ=A zZ3>g(y~IQZMc_g#{w|_Y$D`-+8hUY-xp=&xkyY{3Fbm@tr%thXY0gWF8&f9EDyz^X zquKuUPt1^(vX9uS#xMT(o|!FWbyeqP&&V1mEtE~wnAxg)%wTEGYfWXS#u3Yw6;sP7 zTgp_eBZl|eSw|Hk{Xw$*l4{eregHdSTajhXRC~X|S8iF`#c4Et zZ5DDDBddyX;KNhRMcXdoD*W;C1N)dU^!qs;+1~Fc%5n!WR<%}}Yc<)Pt*9y@fE~3O zYBjaQZTMeOu39={Wb1NO)l~J!e9z9#WM{K;*?II`Q|Hn5g6wj4)>G^1!m)WoQD4Y5 za1Pez=b$LfxvqETBPK55Bor+h$Z2m!P@3dc6c2NAD1{9Tr`zIDj7(s6b1&q>a1bRt zyqmka?n^0hMmgZ!^kuL&NTYk<$3O-DZj|2plDF|L5|T3e#f^>kU)*|rH~ZSg#w!QI zt&LZ1TzzqF+B}on;ekKOOGq0eS0x!2+2)4G2Aw#$DaS}UU*;u%z%a~9>7G9c^Rhpg zgbB|bLZ878c6M@inxwayIEwH`z&eSLSvkLm|%4OxbIBC5?vhIO3uKgJ_b~ zOeT31D1_0%1W)d2;F*lmP?V^H9fy9BmnI@gWM04F)6sFcfL-4wGbV^)PV&7vW~^27-c%#t8wu(d*h@xvW#~@^yV$o1gnrAK z2rMDNtybBsyu64AErZ=C!RFX{Y{x$s7Ew4UK(81+orIVg0;%}HqB31n&|k+Ihf+j= zM#w@C$RVx;40fTWdl0s1GkP4;Va3lgn_BZsk2$u(YOvSQr^BwWmf2!u=0QWNMOxGL zO!L#T59?p8GyR6;pqIy*mTeV2;RXH0H4GLI$}avWB0^e?tk;nY3fKy~F>jf|i^vb--HHV15V*9!MiAzReiP$#9lr&@l~krw^WPw_&QcW``J9RlUq3dxqV$s!|O=F z$zhTA&LUxQH=T?kIY=h=`kv+qID zx*LxmZ$?ROU48q;rU}do<(;v(l~;?zA0&PpZni}kQx+Q#lz%=KFOmKY(i^0onuB*nDSOHJ=Xqv(<$rZ_OHJdJxCbYJ z1+n-egm>_0P2bno5WV}^=8~gjuIBZihjhL>|~8YeM|Q!xtQ0{A9+HFVjS&**(H$vRkBGkFJ= z7H0Da<^Y5uq}#8`8ns5mG>zF3H#Dv^C6EffS`u7MH8Jm|YJtm9*_^dF_|vS7`89w+ z#9lk-+ej`g-U9ny;cdhNd&GF>fkwNm`9HihwEwvmAS*m#ZQ61b?5h~topp;*X1zIU z^k8X{{}W3^wHG?pta877zj40_Mn}8)Irt*G{nlgr(Y>8 z(!9CmA@;Vy_lE4Kd8m0O`%_UhFvyNPgD-=t8RSZ~nyqDB^gXMYZ%>}%YqRsJqk08{ z?B5*}d?GDa(z`gQfbhvw+@P8rOu3)iL?6V@V2FDEe)#Y+`mO@2U6~9H zMuUjo)pYW;Oyh7r8Bu?m}_z~2;Hx?%|b#XN_d^Xl zk7QR%YVp{d{aIxov(3zh=C96w`XmSJSmGlL{21$_O{S}k#$Ezq;&qB4!%fL>tED>b zbCeIFhUQIELls+#<+qVM9FyT(Oko17$sprBUZJagNVq+ z)`^Ey{Duax<0uUZ!m>}{MI`879-5s$T4=pgr_PmL<~Dh7Uc$$HItpK=%WFzgc8nH2 WzI0GXKK|23%JNFi4dte!XQlA3zXI;-vGSVS?bt4+g;y3~45ENG(aab%|EuvQb((nP4OgRZK7x zOJ+%nD6Nbkrx9wp6`mu~45I8j#JJ+%V?eNkmShO6gr%x!hTKFN$r8dyy5f0jr3}qT ziwUi)grEtn7sQk~eG=@vV9n7@vhuu>GT7_}$`#Emr0^%k7j1kRNe}`dkPf0qQZ3m; zkc5L|$ZQE0BEV`GvaW#@3nhcaM(2WwL)iMVqR^_0oy##5N z+@pItB(7RTKMfk~p#SuXy4z1XCB^`+gAVHa7y|m=EzQ*9)!Vmf zKYU^Rzx@wR3`ThCTJ?cV(Q1UBCr2I`m8L@YA8UhGO{{;U!91IuW2+m4@ETM60DU`+ z@S>TI!cHyfjqu=`OXNQv9**!SRY3E<@&gbq4NVwV77~W=(cjEF@0p%!@DG<#8}?fg z0P~K)S6owT(ni$kv_IrgIP-@^=M_g z1~=dC8l92_uBNte*G4RB)!--J{-9g)7%YivD!PvC2};P_zCFgif~RhgFZR^hAmE!u z>iy>ze$(JXvlsO3xY?w^S#dGv;xK@>ugI-gSnx>00ka55qVg}}zg*T*gz)CyFAGiA z--EhtgboRh=!S4uo6&mEAC&xZ#hK=(Gr&?))l@ht65(6U`?6`b(h;7oyu57ddNaaZ zRkI8QnL`nNBkh-LyfO{p8(r=V$T}B-c$#(CdL!zOJzt#sTQ;viqoNi7w^}Gp6)ueAe@OuWBxW*|@@6>ihxFfLz z?;NKbeby@);VBu42*<;z z2+va9%Z@(~hj3k_AA5H$xU7?BQkNc%0+L+L>a~?03H(WJu+MHVKn+g{F82HsFePb& zo6AOig7BfL#=uiCA0kZNG>_w7g^NwdIGuX&>m(n9pY?fpY4l+^!e=Yex2-q^%E`@& zVb24)AiUI@GwHTN^2-hszStA%hj7Hr?n!m8p<{+QS87M^fX9R1Mvd<+Q^Ic4g+FKy z2k-`T$ET$OF!>u!-`jNo0;WYJWwaE7>xuES&*XV3)GBc^VR(_b`%yF0UUOT@#AcaIHZ~C9iA|=Afh)&DbAL>~vNg0HYBASrIf;9I3gxW++py@VB?$BP z&&9uD;K=MXh3u*sIvU~kHrHQOiA->l4ccP-{L6LzM}Z06@ykaVyj)DUWxLiqfP@Uw zJE|6*CP2xnSnX9ngR(K9NA27c;N8MIPtt4GLh|GH^*Me$4C<;~-|Oxe5xk>)QEBaK zFNFJEe-@Vh8ko>9YW3pwN?-!>bXAXBRS$#*eKqAitp)IsE@_h&90X-`Tknh^p&_VM zeBHuMJiSqr%I42UoDAkT@(L5OJ9H8B>L>jMCJbH`d(Ii;2R}+j*cKQZb>d+h!hnNO zif~*1eDC{F(10O7t+@02%@E*4`^Q54TG;%>jcq7J6@~ERVfV+DRl(t4E@Yd&`VTZ9 z7vJCQ!$ycK*l}^+sggxdoY8z@)>;;nr8AGs54M8R(zI@T1t{qc!ZO#l*N3X>3{>h?quG9 za(MGiknIYQCu4-q$X0b!6hjkUT}xdur?M2zMA32pmjNdOODl_)z?opz{TN+Q_XR9@ zc=PU>*L{HnwmyCOY~BbbNq#IX>RB?vs$ZtZG}Z=)P3T@wx+&`#q&jnqZ@@oK!+s8b z+BD+MUQkY4sCwLx2<2=koUr1v4zN5KrrE!HC-A-TdMlCkHCQ%&KTz6#3n*Q}7445% zC~zaWZ(qtru=LqEcL`wt%4GI@v8J*DEQJ?dpV{9NIu>-^KHnVC7ddH*)xxStr#Htk zc7tHDTGakdDQ~y2948(Vr(CBqSxkaX=ag7%PN|;b^m$IHVC4*%PKA+!Tis&MvE?~w zA8eU91{2^RYZ3JE_$L`;sD7zl+$l!9@M(drmi}?!!v#*IaUxe+-@;JKTmMrA{}`t~a|qu9Ub?aWOF}7X`xU>)AsCYc}h2ol*%C zA!0r*)_H-#i!UTO5kv+gxNK@Kr`$!m7>k7q5|OT&r7rq$zF=_5Cc83m!8^rv73`{2 W9`DX|jCc+5A|sT9oRpJ6^#1@OI@{L( literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd8177854afa9f6294782c95fc76b128faf7893e GIT binary patch literal 1029 zcmZXTOK%e~5XWt=ce73N==&iqz2=g@y(*PJJw`}yS_ws~wG%@2g}o6(@}3G8_!8|6 zzLHN!aOqdz#NXRcwba&(e=}pxIG#<>?Y0TV;Op;X$0y{MGwu%t;~b(-pac;#B^iw< zb<9#0dBPL?Gl{rhugSo_!7ehOwyTO{5-*d9P6{Q*iA(cu8~_5y3yA(s37L_aK|BbD z_=a4OYf=a6wN=q8W?F(S$Th9QJIjZ)ArLX)DG?sljZ;#2@C5I<@Q%qPdr97YtJ@~P zWyu$2+EaE+>P_UjZ+8rRV0W7eUEwX5={S9kx&kMW)t&FHOJxN$aaPpJCDO;!-Ntd>h1NDRW`{SgCk5IIz4_Qdl;tY%5NZNva*-i*XQYnd>!` zvhhWs+mG)_ttm;h<#X}rX``Q64~}TF%F0QpZui|8E)uxNtcuv1SCDbUF0fvVt%u?f zcl9jSs0GI|`9ahnF8yaX(p912nH*vd?Tq*p6NwcxJ{ylYsfDKQxLM&5x=Gvy+U$8?$6!{Bg C&G}yd literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10b7829be6565390ea49ef5be43debf96b392e2d GIT binary patch literal 3135 zcmaJ@&5s;M74NFe?@>CIU7{aEO!xf4~ekOGuIU6A%Z)pHRvTA>>oSg#)s*{Hl6py;&gL>Q}E`y;rYZ z)vMp@aidXn@O<)}zbAiPah$)I&eg|7=WTrXk3pEj$j}K1S~{d7H*`rM^Fq&#`JvBT z=6&ddCDvpmR{oHLWfrgsBmt|k8rll0vj*BKBR_Gvt>>86+3OO$l8pCx77gObB#!cA zEO-3**y5AG+}2&3wp93UV`o*~)YHH*&SmI0zIq!GA=j z6f;*aP8CtKM-isKCrmKUn}!(f8PP85kGfUui#SWUE+KB7F-^0H_G2O9GwsW%3_505y1%C+Acj$_&NHF>B#&M;@i&?uHtXmX&* z;F%+C!uP*l-_9|0obv6M_2pSS*zR%0MJzen&PB3)B>TgBd(_X9yYPvF8BLBPk1l63 zXOZ6q@rXy7cmqPoI$rvsTB?#Z4M>$5X+XuMaoBn3FylPm-_9q)zPy`Fo+c*2md_TL z%QOCzI*xP>u!H7&v8y;c=5`!&s?0;{qxI32K5!LbC7knZ>CC7?+6{0ftc!D*%J>3R0l>6A!tDoNy~5Y7@b!Z4c||*2Q)>sXvopOZt7`3>{FF}Dr#IBP zx?%1q@Z=`G4e3D>5Y}*c_|7oYZXVQzG{B`RhX087lYf3nX7K;(R315+Snv{Swrg<= zdmJ_{5iLSjlhd%VRJEvS(Xt>Lu+*DNB*g0DglEawVl&Nx^AM!z0`x2#26UZl(ZH{I zRdV%(hJi-9>VAXlxI1*myHDw7!CDDg=6j3OX`KXh?_VBSC3heJz`{n{!}MQ3OS7qF zPi;lqMNvV9qB1S&lermg!9(sD`7Kki{Gy7@%C>F+Q)ezJl`Df;1-FctdyQ(0%eY$)6hi*qZ}G7!!89UzrsP*nk$3m@NhTxZug{4P8< zjSl4SXx35K&xz^J>8vdMgVvlF9q4zV(O&4=wF2$2I%pxPIW4rVtzzrtDy;Rjm7vmN zWy={B>-xoES9z$=rJ_P>a`*WK9=(Qw8$F3-vd{AfRW})MCd@7CuGniL%zqQAc5bp* zv`xp&k48n7mvAxivCveGwU>-hL<7e8#B$z%pXInlrUKe~rQIRV%-_JV_WPOq)_v`c zj)vVplM`J^Mo*#>sY{F>_Y)qvCo&KHlPEq;wTp?gKa9^f%6tEMuD0T1RCJX86W&kv zWu9V%wq{q6UQNQ+ZlW~DjM8j> zr~DL&^LwKFbJrvYHk0mkA+*ou57J-l{^f&D9)UUg<>Pn$@nes1na z6Tc(gg-68?(Mb%_Egv&#j?{+r8W@Cx44(Q7!E7Si5vHqXia!VCgL(+Ctjab(n+xLm zFv3asd@@e*qdY*&_G j{ttx(yI^-TW=BK*mib-HS3v>?z+~U*&EVbOm3rwvIQalK literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d8eca916b279672480d8e5a5d78ab7e0259d05d GIT binary patch literal 3138 zcmaJ@&5I+)74NFzAZ2NU-acXQ$?S9v`u_KtQHnG#8-u92j4 zMrMRKicX1gjHE+%93wf}JzBL|=${n&(8V0y)Q7yJAU8c2tvtj0My}=>hk>HT`A_MD zV&n??sUnJYDBSe-g>mLZ!&HWPPPEIqgH~1hBF<8-OYoa#Ow(+n{aA?jO#5;uk^^1s zh~Xg0#)HwBu1IL-qhZdq*B@r7u8gz%Xw3OjuKiAb7|WKg$$jl{hA}Iqj$$c=CWo5z zo;zX_bAS20_Q7z#+cE3Pv$*|Wn2ZNJlX?5yu6#6p((Z7^MJzdM=OSs3Ww)QV2i-i` z!vb(hgV9*>=wfJdF!?Sjhj{oK_ya=8I@zREQl-lzKnoca+tB%Pr=7#CIK}iC=lMZ9 zAN9L(FB?5gOsp-wYalL;`*Z3z(mBLtnxn=B;~1H{KC$x z^jMiyOmIFUAH6YYVxA`ES$;*nJXwK!1@cwtt7Wx%PF0|qYNbPO!*BQj*}{)Q__X6F zuV}A3s{X1btHQfgQ!l*xFN?|*3+xdrQ2$?x>i@L(x;biBEQ(cq-5g5`i=#gu{a!h& z)}zme__qosYuS&%FM+3`q3Yltfv;QsL&QZHB~TS0PV-a10qE)_x^{_PD(H?^w39Wp zb_hE=(nwa-+Bx|NovcqbG6rbbMA?!abbw|%&N%_Z+?i}k69+Z52E=oIPK#P?cMy*= zGPmDRo6{w=Zb4=X)Q+K^(W76;`e8$D0ST88#T#l}ZCH3%+5Z3txD@yM!;@&#O?rK< zX)1Ls>E}{Jah|7{u1S$a$8kTBx$N%z>*Y%eDJ?3@y}49*V6|^?G+gb7IFa397R9OH z++1_LoD8$hI5)yUEJgS91+JHa`MvnGn+tQFTTM|hqX1)Kru~xxE`Tpjxftf6j1FNe zw4WcuBaTH5vjlLFq39Yv9mN@ofNi<+&3>gt^qBX3Sj}wXTIh%nl=YhJn1i zD|uM4fDi!?G>OG^qyCzyzF}>;>{NUcR7gK~4`n~3579o{4}I%kNZ)%)tebu>mC>Qt zSdcw|?1m&H2O|qzV$HG^#jwX={Q}P-u$r8P^@XYhPz#(zwFOJPy}&`NK2CU+oXt1W zEI9uVmE#&9G(V*QS*O?hzyly&{@rEtpt!5#4fh7U;oYFO0G1`_m~w|Mkp?xOx=LOL z-*6jLv@!B4NNG0I?59nPcaU2Uok&pgG-d9CThNeuMtnQAJ#PBmZEldBfIU_~~`j4TpJlD5t1=3@6(1KSp zT4-Ha#jUSaVXd#M1PLE2TZ~Yw>lr!1T*^bLE)}U-lf4%&@#!^W-snjzlLMYdNWV#s zGhyynYsFp?VIELO-?@olv1}MO9}kKcFX39`L!qf0YA+cgfd-88k)^yIKg)56O#ZXC zO1piYnPs!p3Gb!{GEcF>B3~oa5MZv!&L-S=9*BFe2upLxyI`*R z(^$Yuq4P_q%-sN>&~++)2uX{G9n;jbZ(oN6W;b(Wk62`T0}gOca*NeA8pYdYCi4oj sC}KC;?uQ-i(h|%gQCOG@ZV!fR+~)>YoZJL=fS9qcwpsmA@LEv%57FZTod5s; literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03a12293885e9e925c33e1e981259b4762108b21 GIT binary patch literal 6160 zcmb7I&2!tv702QmAVo`*Ecq*r(==_FjIB8RO51!mabm}f%(k(Ub}()c7zlSRkF1=RH4Xt=^ztaO;iSlL%S6*M*0I=ZJznufmN znds}>h^>z8*^+L?PRI2svTvbZ_2y)s#kCGo6{d8Kc=M8C#|xdhS7(Z&TvoWl-Fpgm zBeuD?r+G`DRCpDX>X332lsR4lr8cB2gK~t=gEBv)tbnq>>!8$!lw+VQ@+DA~hLq!= z9OcWPEDtFsIJ>4aRvu#CO0&TVv)R1ZO>U#-ydPxYMzbqA4;cnQITm(1z3nXW1HKuA zQ4;R>ogfqKyBKpn2(nW5_w&Qrm$Lf+O%p&fu1v`-lM^n*^Ch8n2(bC_e$&`%c z%AUHf4j}wY+t&w5UmO1GnL(f1Q$J9C{dV8TwS5yKR;F#*Im=aA!hPa(a&^G)?+!2n z#?;wk19Jd_$@Pg@RBnuC;l_XsFcWyRT=6Tpj<&kB#1(AMYzh+c!y1}P4NVj{D5M;uI?fdPdo%w#DCQ0F5?egt7T5G3SVTd4UMTHskdQrj)_gb{I9VJ;i zhzoPAokW2sDhDb(&kOyNXzkO&TF-iZ9rFVC)~Syu$DXf@#NY$v$2x*owA0;p;=72)c3){fIvX%Odn zJK_lDSwc=CD=2)F11WlBRyGKQ>L9>Hpe2J<}A63Fq!w91| z_Wg+koyMr3RY_~6O{WA}TdA`7ladzVajJ{}E*y~I2i!|-k!IuvZij@KQK3GuNbtXf z&XeGWq9o*@cqV~q0M;fi;cX3+0RhvNfHay?We;h#OQMRl;Rr&fc%2%uUNKJ%*>GXf zYTn=z0DFUr6&jfjF%pT)4>tlht~hNlsYoFuafW7);-CrNLrn?gHMR_;n9TipcX7Pi z+4fDgPK&%qFVg~ZWgk{NV11UuwLf6+>f(E#H`MaO-$8#uBl7E{w2x39k;icX!rl7B zcZXIWUYmY%tVmY%NB zs!BcSYK@ty`_=B^cqg;YqfZe^~^*4GBGeZAXMg$s$|o6O`#a>C+FZ zjFbi6sB?CoA|Ik}uI?c^`=(Tw+I+w~J!rKAT%82kPcT!dGP3c)y4-BaNHs^DgGg4W zHh8%WH$2uX;bc^3 z=TxO;2}vaaX}1H7BFjD^3-G9C>OS^QJ;*{|LEJC3u8X(vmUtIU!7dfLh~n+xmZS|7 zZp2#=;y#b=7LM!(8Js=am}MB7m?pK+)R>E(t?r(fv6JT-3Q3=gVr(d;7|}o;m&1lq zA@ww)>`qtQ3JJucLWrR%E#xog9JUadomsF8iouNRBWFtr`f6?=tFp*~GCMcvA9OW0 z_8?8ov(!p?8Ug zmuo4r!t$#YY_}@2xH+=m2M8GygJj$&^iC8cg2GUvR+=;U*9i}_fX6~>Cz)8J0cq^g zNl}C@v`%nWNV7LPJANxn3Z3_|jfPwL@C)<`g^R*%i96YbPia!@mu#l$^QD!n7(ot&v-+GFNrkX@sFO8eAOPU7Aqr7zwnhK8iD1)mj$}G;UQ%@bUatoT! zCN$A7OF#M{mLo1vLsBU-uO@m8yYwA8$C2d_EAm|E=NH8PG&lTP49cWx8Odv%&FeN> zQfLc<&G9fx8h(;isDu)5LF!32UiFF)`C=VJH%3Tg%_g6g3 zgq<@JcFz9=J9h$hl_}U&B(*HDiT2*nj_ zurXgE?w9BvLY(-RXfqHe)@Xo?xwH>54RK&Ijp`xXiJ#H<*1?SQ?ULfQ?*+F79c zBk1WFG|vK^35(i2@q~J3gO7km^&)B-qXEny8vszHV~srED4QyvhQq9cTIntq`Wg-| z1huDdZ!JD^T+emL{Te=qm?g0_mIL`RAyHOF=n`L_67`v=`ZG9EO0RXcEHQNqm|7-G z9e+|9&qWs5!5`?GpHV|kPhe^k&(m*VbaWxXO?f2&pMTH6l?1MOrmiHcqH=}MR^}Gh z!_gd=K^{a7K=&UIra>of05rrFc1snfxQg6B$I*mt9N-%>b03G1-0W$+l_Imb=O z<8X`HQ#@|603HWC?{XJBmCec%^SD`cPwAW49I`KyS8z(TvKp`QIb>kk4Fb>+)cG9O zE|`aB3*f5BAFh~SI2hfW=L`4kK1E-}ujlaJ{vzO~_)EjLvfReg(QFy*a<+nYMdo$& zrf2ggNuwmn+PKY8t~Z$H_C%D40Jjp{GewljF@VpTyK?ot%O5sByng;;|FiQrDO-1< zc55R`3;S+}=+^EgZ8%5Dlqte_bi7*MPC*`CMYWIr_wi#Hg2hk4@Q{=O7k>eNGGIO% zxQ%0_=lB(|(~+#uT1g@(u6dQwF3!JkULO4l`wB=I?m9eIUW2sP+mZ0>azE$aEY$T- zcr+88VC3Oe5J1OnMcN{6UXt4~v8~+c-$Y4EHR|)CAQafaJ!$MW_6MO8xRWhCV00K@BBp zFhuE(U>;H$pVNR8$t%?RHZ@XeB%#8Ay(Vd|n?}eeNY3(I&uMpB{&(rP=vl3x=i?H_ zv*n!{?&L&+_|6a1xMp8j=ptneLD#WsknJ`$9+MHE-I4icAFq8aaEXCxQT*6sS)dcD4Y`sVWM{{vLz B0lEMH literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30734fdaf9ed09901944f2c7a6b049702867c0c3 GIT binary patch literal 6431 zcmd^D&2!{N6<13djb?1m+GDTxD_N3|giIjoc*944By6&YHv#Vei6N9~DV0_&+vC;9 z-j=*;yEI(9RJiPgzra2KMOFR_PE>K?09CreWp5lQimLGYwdTVg6N=3q(Ue-P*H1m& z@4er9uiraUQ*{H^AAj@r@XZ$u<8OQ!TsAt_aHs!3BMm8Hqboci%D(BDpqa7NwLM$Y zEc7d$gT5^*akcAuuBJP2ty}jdbia!Jq&KDeB5rhrX$Yfx#+%j@H=gM>y{0g37_uho z4-8q4#K!Et<;{UIAtynZ>{HHyG9??JH2RczP|nC{P^SBobD+$~CMeB5+Ma|eIzgl%K31!yS1G~ zejqo3FiOH*zZ+z#vxBkvyFnH#Zw4|_c(eJ2r_Lqxc;+X@ zs*pmO4_wcZmb4!jp8bho8(sygs}yIKw&Qr$$I6l@%p&P82fL992U9tiX$m*r(@)W4 zMkaD&-#jo6A@a;Run&!%)qmPMzsf57vwibjY^TkJn1gH^RzvJPJzk>yFJTF=wj}6Au(rDILv^q<^e27)?sgEDBU+J)I4-98 z<`714?E9kywS>Baam96ME1pFsrE$Ar@{*>6co|*;NI#Bus4sO?n$bJl3yCCNd=fO; zCA@*oX_o*cYBvD05E`r%K;4lg=KTez+dMnGs4_%>y|Nm8hqD8=s^u8i2%_2Q>C(M|(YO z{X_wmcpZ1j)#SzjEc{UPL{2v^;NjI*L2sG*Pt~*Nk91WkiRyeGw^ouN>SC{XN zCF{Y1Ld8j3Sex6~KnBN^`w>^xSFQ=qXYS38_HkOd+z9XM{Op)=5tatZeFcO4C6vm& z0>VfcBMsl;EQDdH;Uj|kw8!R2a5({Gk~CZdY! zjdokd$tiY*akMaRC#_29q}{%{)1rPmNF%NK)T_ZD={lphsB(V4E61CHqDxFeRVM?(hk^nOF9q0;GUuPwcE@H3?Pd1aztAa+ciEU)QpFuG19ESMbgmM9vJn0w$$FQd`vqXkc6U4|2#7Lz8dPh1wR^^A$c zaqft6M-bvNdC~h0A+(2=Tx{@0xZE%;!$k7f1C%6FL}SCsD@Yi%E7`Yt=FXK2BcNBb z9xo#itVr{mF^>1{cc7oo8?EM5K+-CY0a_a^r}W;hV@c}UY$%s9dAx>R%PrlGEpg5| z!HNVO8lqEBpXdC4jlnW6H0O~SHtiu$p3=Ti>DMiLh2ycQNu%|O1uD*JjCJH%|jef2}}xL64^ZH<>CG^ zuxM*cj^VxgJTUorqfYLe#H4i+gyjM41pL$z7Wd!b_Xo;_2SFaoj)CvOX-Pa81ZfPv z<=+h<}(MmoA8gXr;~hKdYoT*oRh)neiU0Hv?JaN&d64tts zc06k8@`r^8CGME&b!j3sv2g}txdk3Ja}IFo$}7H`EP2nLhWQau;vSY!>ZoHivdDJ1eK<4AMyJ77q;bxX>)}Fqv|-2dOU2dZl{=~U#RO``1fa@wa%B({e4cx=w4WBNusDXZ(^|a z#jA_@JnOk1fRv%ASk5Ma|g2)^>iynQEV$f z9lwR&((FeD=_;8 z2#Oo_nwsXXMVnoqwG(obnpE^=>MB|6ZYK@3MS625xpgW=ZL1%%q3#t=!rtuQzpkuq zCm}H%#9n>WSKlJngyy=}AzDi;IyZIEP1ndubnz%%rrkO-C(iVSj)-^|feZJIvS{}f cb9^0!X~DGZ^4AoT_2w(hW^)GjV)NDi0QJEjiU0rr literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5e5353b42d2d15592a4692620f29e968be63c3c GIT binary patch literal 5565 zcmbVQ%ahwk8COfPUiLT>kMr6`2+KlYU^AO6kFabl%O*^AsaXR_;2=V&G-`Q9jx2jx za(2cgbD2LZHP^sc z=e5`zSgs}MMr;opw=Tyf#tnBxj#=CsFjZm7;FP;6DOP-X&~jT$xv6lQI}a4@gsi`I zpt)^O>bwC;qoSMvWra6EX;zf8pq%2XpsZGubD*5&El^q&w z-yHfp6u8sr4*$kr$ip}dNc!TXRX6GD7=((;xZ}Y>eZ0-^1#XFvvpgZTkD625SGS#Yo zJ*&~{4%8dUZzt1Qt{obfF*B`in_R*6jIN+Ve%wU2@rb$lR^+FGRIsyB)Y9=N6vD<`VR>GZ zM49ImYLXPrM?*f2!_6qoikk3~?XWQX(I`xK;oJ^)$6=C1eq0!vQ4;#Xtsg;pg%|p* zaPwD%*~^3%?~B3=;*#rxd6pu)0kAQ@f6q?x5RN zrYf42!MZC&cy|g|`YhKkC^*F>&JW(tzS1ovWqU&5GX;w8+u8bB>H%!yY?s_fw66tE4&$G+g}xJg^+l=6&m)@__|Lu<0h ztIJQcdh(G$Dwz0#5Gv?+9w3AQV%+e&-LW5+GZIkob$aDWr6WNY$DX&apqDW#XjM|N z*Dy#)-!0a7SyBSroVeze{vuAp*@#4%k<+*zkb%uF8k0RqPw@^0&w2_#Dg6UfgGno{ z!3P+;^abWjnUNJ+(t2xM3F>2#wjt{1JGN-xPkfs?(r!VPE$GNygI05uFIeMME=c=} z)c`Z0$h=_3hnI`f4wFy|c~(a-N`XJY1F4K)tf`$PBl%*ocD&o=7C_cOi@Z(`(~5HC z5PCjiQVFkZk+!bPxtfF|Kmn8PNmh4)r^BEE6*$z!zXNZy-2 z4zx{qNC7A(E?OpCFNAg=Lbw=j5RXp81(HREPygwg=$4}`y*?a)^!f%6R)Mf75!SkQ zN?ffHS2Ccda22G&u71Z)b_$IgsV0xmg&zf;NnB{dakdP}+q9|*Nfi!HHU2wUJKo81 zBxx(uuG7OOkVJU8t%%E@154U$wGbh(yaK{9K<|3R`BTerg0yD9w9jx=F!b6*6tQjF|rOhKPyr?lm#fh9=cg>}vzQn*FP&(#B@ z1fC&bRl5{x{q1c5rz#EhDuk54lC2cx&2CqQ#}yhJh0j9WN;oUM6$udy~37bKSNky zJImCk4>75Q4*P&daMa05OLp?5szj$xiZhP{hL}@G^wIVIs8XE(?1tILLvb%4XE49W z@~BB2`YQ%c>yS(uO=u3~1B^5%XKe*zH8+v$nWSTxl^gUAx|-JxaNbDsRB+oIr!G^> zjj2U4a!o#KXHG724)8d+P1(oPK9Xlw^6V4x>?zy*wdASOv&6&8wUo(f`BoF!-H^%U z$~-txw3894&lV0OMo2nV;pw=XY7m?G|xOz@m#<_d#xpQW2A`8^YBwr%YF-m{@BP>Ty z0wPbP%mT@#J67pkbdE~uDOOhtfX@a!^gMU|2TYa;o_ZE3UQ1s^LfzIa)>0>HiwE|l zCQ14+CTUf2_KF}18R>~@?weAI`}2y98?vT@c0RC?@1ia~QPY`4{llae!Z(tiyc5i2 zkYGyA2sde!|Hj~?Rmu`dgB7rQs<_5g_&FWv8kKB7F*6R4pyf5sN;be#1J7MqD4Uu138l9;4F}lYzg3cXTsJKB>Kc()- zFkQ+&d*c*Rs=_|H@Si77I_FDI@c}WDrzo`TBoTy;Tc5A+%GE0}#dWO@LCR1Ax{j<3 zqh1sW*DBX~<(jMZ0`W7FBvWI8St=Cj&*Z+0w9PF)xVJS-kjBaIY4-vzrcysGY`nLk zHjlqIc+ls7u0>ZtD-@r#54Eq4uRCl4kH3nE(_N<^~1TQP>)p!S!74n;hk=bYO2nag9SLVc$lG zKyJ&G-_wUI^gRu3Pw6{ob(F7=&y?LR?ix_%N(MI|*~(V3W^Nr&IE19bQ@Mpz*$GNy zgfJV9&P28>RqrdMCKAb@>S|q(cwGD zj*kx^GBDjD>X9_@OPbe{a4#$LD8t7R0&pId=*_Mj4Ys|vC=qwfZGYsUL>6BoA+OV{ zMQ)ZVY3WuABbHyXT$}3aUX=LpvZUwFcrvAen{sA{5+AjR2-Rkn)i$XG#*?*0$SY^t zVRxQi9+8#(kp$BLTC90V5T)~yYsoJHmtlEL5X2>PZcEakWYUgs6g5Ucu{ZsXWyNdE2A@QFM7(B(MUHf+miJ}Bh$>$qslT-)fn?PmGqcDD=Ex{jWg+ZE<;V992iISp!`uC|+<4c94G*zHQyX;rt% z&5FzG_t0k$56X1CwdQd2IVH^>ae1ZQDg(BA!-%-7S&_n^{sL;jej{2(Rjd|n3V9JKdUw75pniyj`Gw!3krg#&s z&Jtqmw!soJ%8qtjdF_z5M@;M{+@u&2dmvcK*GPl`*Yp`hWJL~Rq{K+CZw>1S#$(C* z#=EMjynfC##YAsRmcmSvZrZiPVpnv5aYa(3)>5x1BF#*2 zeE%ak%mGfL$1BXWGz2%j>g)Ap&DUx!*YK7~s;#qnlS@H1eD$7hISgV@Z!zbdpO*EC z>vCV+f>c?zDsFYX+^ugrzOe>5FOB$`+x8P~n^)I;y;^bo1f;Cy_=)a%rQ`U?N~hzr zm~WC?mZ1Z7dZX2SeZz4uM7}TJ)a4s3=bq~ub=Tp13X8MT+YWD+YrMj2^H!yL=Wc~F z-{`tdr!?ayyOn0A;rOX)YqQ+g;(ne;ZnaxZS*nVkUaOWHqg%4sF_cFRvPXR}^)_#P~ZPZAg-FfP}K?JO@>*INxcRjYK$ zYc97_0Ody4we=3i(iuHFP+z^{CmT|s?KFDIYxP#8;TsL;`}Y*Sg5f?`o`(q_O`NZ= zy1P}mGr#7*06-7fe3w_}H{5!oJ0B>ttj~3}dJ0aDF5eJsUiSuoshg^$St^xT)l~DU zp-rgIs$=*|qQ}rI%`{B($UnnO2DLHCfx2d@lR7_-p{k_Vy&vsC0sm6b-y}tcf=B_@ z!cq@PR}~2;O4ikG>$eS1CEk@rfld@CbHdAoJ=!BZ`LIWSqzCFJ)#+tngtG*r8;?*H zSi&2{{}{{y)&Vo|B^Z`*Z$gZ+boX0chh1N z{iA!8cQuw~)_n~oWK^W?B={exN=tqHrd+4O|0G!Bnhrw{u6E6UT4z>ETJ5j@ihloc zxpw;NlYjezAO7p*+Qn}z{KL1mo_nVD@9Xb<GFA^TObfAshxA%E>13~5+FH6y{8GBA#fJJ*71Ne za;wZYS~%&tU0<#9W7NM)fOIkd=TP$t!5Z+;Ww%{M@e^q6J_|q^hCgFAF{{o>ZJLFy z8EQe+HRzkAPJgH+NbklzH1&cu4IML7?V$<%%xaueshR=k{YFvA;a}?5U!uZPFXQ4M z-N9i6y;5)p$zwsNOcxm(6BfxUB+r19CcLDV0<|HjwTPCElYlM_t%M9dX0#+riOhr& zQN~BqcMWJAv>1nY&Lu1nVtI)rLu^E1sSqpB0UT)6S_1NFuGar90r@(5gVRY+tTo!V zDvjc`)zvda7)9r-yU}urI1`F%D#Sn)G_=ty=Mml|~&l6_&a9IfqNhfulpD z+eZRH#hW?nw$Gv{q8oAx52pAE%sCNpa#J42(=W*{qb~QJlYR4zY8&>{WRBC3HLL5A z3&iSGw=^nc)3-=k0x9&ZO~_X$kx~L5Be;Em4CM0)&0}`BvsnhWpQPjKuIp!_(+fxf zM;D4D9b0$ZVJXa!>lH{M2)=>wx}+wCM&=qOUd?`JCJHExho)gc1gRyT$fFN`8vh)K zt0qA2M`A{zCq-=?6^NSYh5`nYMnaR?NqOnM*r64n0tAkF5CXW`t`5O4q+nPQON3ZP zV#yH81}0TxrQy<6>$@XxLlScPgBL*u7k{3>3jn2DU=m)WhR+b7jrPs()L)F5390_H zz5FErJJTbfE&MTRUL`Q3(&=b3zl+{3xwD2kq0Q=fowh5h^3S8GngZzkh=#ULZkLM+ zwu-DVG=`~6!>N|SMx{i8&NxOec|R#PG@vrjN>sF+X-Uzi!q#D&B%H|%T-+4u+K!Gh zE9>R3**J?xlR_N|yf!%dZmYc6DA(D&o6>gQ_?}^>y6YQjYYoTFz}}WA>VWmNtq$UV zrqgWmEj!J4y8{OaZb|{s$l6913e}4w44%Yuae7KlkIpTCQXbA1l*mt!&(m?&{dBj} zsJrD>XPc7@aoQQ*xK-|MOIZ(HA1Dl`*B7FY`u>ZIX~)ou8LzkMZXK$=?F7OO)&FCR zB45!M+!*?|w1S$~`A?y}Y5??pv~a@ZE4q$~tL(yL?rKs#4T-5CmXMehVo8bVA(rYH zPARSGYDwoq;%Pqau;MRc%9zZ3me8T+n~pu7bXa>K)}Mp8OZ%PI0+q$-U<0kD0eU~G zM=E<5NbpEfReR@x$<%jD0iS}RYgk=vwPgBZp;napmd&4uY)`O|Sl2}4M8_HO5QBws z->D{iaR^@;!k35el|Ek5q}KrHLKs>~`-$ega;xcQYK>csa%dk55{VWH>uAZAz*1(S z1(7L@Rudw@QbHtHM~DQAc+|v>(Mp1dq4!$;2AkZS1~4>D9sD!i*V5(>c@v#Csyblr zM`9RRia!7Xqp8B{8)NV#UF9}TZg|0}XOM}6XKa*>))CNik zImb-Uq;97k87JwQJBpWZ(=gBokiDeP_-9>=-Juu>Cg>Kj9ld&u-*wl^6y5j;j|78mjk{B=O}zD5cL)uQ%(lL zPSsl-9Q4iLK#>ReNn$DycvN?xBbc3>g9>6_SjDQl!JWScarYT7MYdMh( zYgwdOf?mW>%!q6P+F0U#>eU2AFWx96SkMp2fQ*pM==V<`%9-@0ylM2O9@9VL&3b#i zecpcbn~&)~;2rde-XZTW`qPi;KjIzrj(Nwu6E1nK(l^{O@N_4=l6T5G?ZU$pr}1=W zqT1Q0HW$_AquRoor*V>6cXa-zZgMvz(%@Ybi!6hr(@B(BlqHlol&4VUQJz6Lg7PfN z0?Ij*qbTQ5j-gy&hPQ<14B9~nrnlACu6oOEnvEkRw7jDthMSmD$a}`S$cU4aLc$x! zh?1=3P&*}SdDPBO4ZM4#x1GhQPVwbv@7WRijSF)pgEzC{o%7CnPhfQ)KkC_VRHULX zI*mFZ_K3-7)F~u?f>AGcPkI-Qpr9jF=UBqfz&X{o$yWyr;cqyvx|R zj~{hD9CbPx^?*1iiqWWt#Nlw%XT2-lRqq|Vut0dHq7-s}tXW@$9>f>^p^ysh&0#0%b%u-LvS z1^r9%&F`1>Kne~Zs(;bVi5JC5Q4*)b8F5z3iFvWGla}xNpj=yCoD)xqr^OZV99ER{ zPP%z7T3GtM;EUt;?Wy>kFM5D=;&%FK@@+2>=@)nH5O}Hau%s4rr z(u##HVGwChh{K-)cpyz+htN#|Un1}-f!7H9G=VaKpCM2oa0_62;z;aQe6G{3u3s!} zPZW#ASbI?YAZrz^;wvxSe6F}qv|`xe0Jb!MEe~KTMe6}s=@5jm==+i3bWO4MJ?Gq$b>yb>r&+ZN@SR6sXWMpodn%-oN4)ynU;r` zuzz4$i8HM{!n6`F`KiigjW&ieY?&Rn=#NiR1j#aom+#i$O9VjM#hWG#={s5}}I zzJEj1vsUnxJNwhQi?yjYsu`x35n%G?4-o< zAyyioJT*XhdVuoG0Oi>M%DDl``2os>0m{VzO1QhRbuJH3t_)D(t~5q@et`0c0m=(3 zc&pah& zhGxF(o%QCt`Do^efteSEW?mecdFd0+ycaI{;4?q#EqIIGQZ#cAc=UFAd1&U9p_$Kp z;+YSMLqjuP@s=rwiDnLhlitkdhh~0aXyyx_c;+K;^9P^#stlYi1T(|2z8t#vS5RJs zn?D6N{~Da@C*juXd_r7Z2BF9qHJbLrpsjeDM;a&uzfbxF5UaEeQ&IN zK`e@;upPkyrQPH>%LqBnh3yC&qW1G}_b-I)2qI*A0h(}DEQnAilG^0)v*#aW3^<_jonT(5+msdV`(Xb4l<{3hx$nBjM+M!`&d=0^`7 zVsPRMBA$Cx+aj<{;0*$TfCo_0?eQ@Axfu8Dqd;hTkBl^AS~2_%29d`4_M~ivi{vcy zl7U0*iyzUx^bzgLAJM)t*e<^hC_l2Esi6JQ1;siBR}||QTvDuKa80p}fkojKjIvfi zP(=Y0e~ZAE3Bb{bUm4sWG`h#>hgh6$Hs!4fuTo7$RM-71IhI|x;B2Ge{Lk?f{|fQ# z5Lh7aZ2;etK~%+E=RZzuCkV&{-e4{s_a5{)2EvL3{#DR~I6T?~{OsC@cY2!^_CrEyCZ2uf_jbm zD2TBOc<~F;S;to|c+W2XMe6&dUf<$C-!D_&uk`wYaH7ZetJL?MUSAM3^!k2{`hLCF z7X$~rzTcp}-z4x01l}g95x3ciPaHJF;uSkh6hXLxuU~@e&jtDrCa5m`!1WP=1~~AKFT?(<9$uTu zyTIT^@KIE=(K@YDK+?O2F#aoP7(X(S!KK8f!%VXu z-c0KMH)9Tu!haXCaH1yAisABjbQgjngU3(mjuR3mv0a*Lm*T0coiSBb$vE8hqyjNHK3 z#>-c54jsOE30s3h^BGd9?o|L&q3`|ZmZ|qgIQ>zoL`z!_wJX*`bJ5&y=FNhZ)l}=D SmACdOZ&_*OE#<9j=Dz_z+27{? literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c166ef2fdbf365a3db114991e6681128de528b67 GIT binary patch literal 476 zcmZuty-ve05VrHzG_*xXh=DiA(u^#KR;q-UI+RWsKvwJqLlQ@}i_|^>5)V)&I`T@M zkYMS?3_{}EqSAr0?$h1(`F?ECZbt|#et6FoE<$fM*)^;7!Bqu75yb^6af&fw_}Kw> zsk6ik-+I)g9`zqB=P#`qtg%6Z99v&Hxxd6|xUF+--9NX0Pn#59qPVewnP|wbFhaZ! z5IgzP!rH2rTM40$UW|>eGE%T#=Cv0`=12>f-Q07Wc zF~#T19E?asMY+l7K#JNNZm0_(`CL?*6z2u6O*j@~Rw*G(a|mro0oCa~wC2_Ic7W+< zru1L_f@BL8Uc=`}qF_UkvxHEtXXGxKFv=v+j3!EE$yD<~CEH#^!#h;Bo|*8p6m(j! bGx!apjsaT8^FrLh0h|sFzP!C~9YxCi literal 0 HcmV?d00001 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py new file mode 100644 index 00000000000..a694c83ea06 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py @@ -0,0 +1,53 @@ +from torch.utils import data +from PIL import Image +import torch +import numpy as np + +################ Dataloader ######################### + +class construct_dataset(data.Dataset): + def __init__(self, data_pth, split_npz, site, transforms, tn_vl_idx): + # site [0,4] or -999 which means global + # tn_vl_idx 0=> Train 1=> Val 2=> Test + # load the npz file + a=np.load(split_npz, allow_pickle=True) + img_names=a['img_names'] + gt=a['gt'] + clstr_assgn=a['clstr_assgn'] + trn_val_tst=a['trn_val_tst'] + del a + if site==-999: + # This means that we are performing the global baseline experiments where data of all sites is combined. + idx=np.where(trn_val_tst==tn_vl_idx)[0] + else: + # Select only those images which belong to the site and train/val/test + idx=np.where((clstr_assgn==site) & (trn_val_tst==tn_vl_idx))[0] + img_names=img_names[idx] + gt=gt[idx] + del idx + self.img_names=img_names + self.gt=gt + self.transforms=transforms + self.data_pth=data_pth + + def __getitem__(self, index): + """Take the index of item and returns the image and its labels""" + img_nm=self.img_names[index] + # Read the image + image = Image.open(self.data_pth+img_nm) + # Apply the transforms + image=self.transforms(image) + # Convert datatype/Tensor + gt=self.gt[index] + gt=torch.FloatTensor(gt) + if image.shape[0]==4: + image = image[:1,:,:] + else: + pass + sample={'img': image, 'gt': gt, 'img_nm': img_nm} + return sample + + def __len__(self): + #print(self.img_pths.shape[0]) + return self.img_names.shape[0] + diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/downloader.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/downloader.py new file mode 100644 index 00000000000..9c0eee36cc8 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/downloader.py @@ -0,0 +1,33 @@ +from .get_config import get_config +import os +import zipfile +import wget + +def download_and_extract(path, url, expath): + wget.download(url, path) + with zipfile.ZipFile(path, 'r') as zip_ref: + zip_ref.extractall(expath) + +def download_checkpoint(gnn=True): + config = get_config(action='download', gnn=gnn, config_path='configs/') + if not os.path.exists('model_weights'): + os.makedirs('model_weights') + url1 = config['fl_with_gnn']['url_model'] + path1 = config['fl_with_gnn']['dest_path_model'] + url2 = config['fl_without_gnn']['url_model'] + path2 = config['fl_without_gnn']['dest_path_model'] + download_and_extract(path=path1, url=url1, + expath='model_weights/') + download_and_extract(path=path2, url=url2, + expath='model_weights/') + +def download_data(gnn=True): + config = get_config(action='download', gnn = gnn, config_path='configs/') + if not os.path.exists('data'): + os.makedirs('data') + url1 = config['data']['url_data'] + path1 = config['data']['dest_path_data'] + url2 = config['data']['url_split'] + path2 = config['data']['dest_path_split'] + download_and_extract(path=path1, url=url1, expath='data/') + download_and_extract(path=path2, url=url2, expath='data/') diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/exporter.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/exporter.py new file mode 100644 index 00000000000..b9daf9cd869 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/exporter.py @@ -0,0 +1,63 @@ +import torch +import os +from .model import Infer_model +from .misc import aggregate_local_weights +class Exporter: + def __init__(self, config, gnn=True): + self.config = config + self.checkpoint = config.get('checkpoint') + self.gnn = gnn + if self.gnn: + model = Infer_model(config.get('backbone'),config.get('split_path'),gnn=True) + else: + model = Infer_model(config.get('backbone'),config.get('split_path'),gnn=False) + self.model = model + self.model.eval() + checkpoint=torch.load(self.checkpoint) + glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] + glbl_backbone_wt=checkpoint['backbone_model_state_dict'] + glbl_fc_wt=checkpoint['fc_layers_state_dict'] + + self.model.cnv_lyr.load_state_dict(glbl_cnv_wt) + self.model.backbone_model.load_state_dict(glbl_backbone_wt) + self.model.fc_layers.load_state_dict(glbl_fc_wt) + if self.gnn: + + sit0_gnn_wt=checkpoint['sit0_gnn_model'] + sit1_gnn_wt=checkpoint['sit1_gnn_model'] + sit2_gnn_wt=checkpoint['sit2_gnn_model'] + sit3_gnn_wt=checkpoint['sit3_gnn_model'] + sit4_gnn_wt=checkpoint['sit4_gnn_model'] + glbl_gnn_wt=aggregate_local_weights(sit0_gnn_wt, sit1_gnn_wt, + sit2_gnn_wt,sit3_gnn_wt, + sit4_gnn_wt, torch.device('cpu')) + self.model.gnn_model.load_state_dict(glbl_gnn_wt) + + def export_model_ir(self): + input_model = os.path.join( + os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) + input_shape = self.config.get('input_shape') + output_dir = os.path.split(self.checkpoint)[0] + export_command = f"""mo \ + --framework onnx \ + --input_model {input_model} \ + --input_shape "{input_shape}" \ + --output_dir {output_dir}""" + + if self.config.get('verbose_export'): + print(export_command) + os.system(export_command) + + def export_model_onnx(self): + + print(f"Saving model to {self.config.get('model_name_onnx')}") + res_path = os.path.join(os.path.split(self.checkpoint)[0], self.config.get('model_name_onnx')) + + dummy_input = torch.randn(1, 1, 320, 320) + + torch.onnx.export(self.model, dummy_input, res_path, + opset_version=11, do_constant_folding=True, + input_names=['input'], output_names=['output'], + dynamic_axes={'input': {0: 'batch_size'}, + 'output': {0: 'batch_size'}}, + verbose=True) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py new file mode 100644 index 00000000000..c9d31e27ae3 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py @@ -0,0 +1,23 @@ +import os +import json + +def get_config(action, gnn=True, config_path=""): + + root_path = os.path.dirname(os.path.dirname( + os.path.dirname(os.path.realpath(__file__)))) + config_path = os.path.join(root_path, 'configs') + + if action == 'download': + with open(os.path.join(config_path, 'download_configs.json')) as f1: + config = json.load(f1) + else: + if gnn is True: + with open(os.path.join(config_path, 'fl_with_gnn.json')) as f1: + config_file = json.load(f1) + config = config_file[action] + else: + with open(os.path.join(config_path, 'fl_without_gnn.json')) as f1: + config_file = json.load(f1) + config = config_file[action] + + return config diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py new file mode 100644 index 00000000000..fc4ad0ebdfe --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py @@ -0,0 +1,213 @@ +import torch +import torch.nn.functional as F +from torch_geometric.data import Data as Data_GNN +from torch_geometric.data import DataLoader as DataLoader_GNN +import numpy as np +from .metric import compute_performance +from .model import Infer_model +from .loss import Custom_Loss +from .dataloader import construct_dataset +from torch.utils.data import DataLoader +from .transformations import test_transform +from openvino.inference_engine import IECore +import torchvision.transforms as transforms +from .misc import aggregate_local_weights +import os +import onnxruntime + +def to_numpy(tensor): + return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() + +# Validation code +################# To be used for inference during training #################### +def inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, + criterion, device,edge_index=None, edge_attr=None): + + tot_loss=0 + # tot_auc=0 + gt_lst=[] + pred_lst=[] + + cnv_lyr.eval() + backbone_model.eval() + fc_layers.eval() + if gnn_model is not None: + gnn_model.eval() + + with torch.no_grad(): + for count, sample in enumerate(val_loader): + img=sample['img'] + gt=sample['gt'] + img=img.to(device) + gt=gt.to(device) + ############################################################## + img_3chnl=cnv_lyr(img) + gap_ftr=backbone_model(img_3chnl) + ftr_lst, prd=fc_layers(gap_ftr) + if gnn_model is not None: + ftr_lst=torch.cat(ftr_lst, dim=1) + data_lst=[] + for k in range(0, ftr_lst.shape[0]): + data_lst.append(Data_GNN(x=ftr_lst[k,:,:], edge_index=edge_index, + edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) + + loader = DataLoader_GNN(data_lst, batch_size=ftr_lst.shape[0]) + loader=next(iter(loader)).to(device) + gt=loader.y + prd_final=gnn_model(loader) + else: + prd_final=prd + ########Forward Pass ############################################# + loss=criterion(prd_final, gt) + # Apply the sigmoid + prd_final=F.sigmoid(prd_final) + + gt_lst.append(gt.cpu().numpy()) + pred_lst.append(prd_final.cpu().numpy()) + tot_loss=tot_loss+loss.cpu().numpy() + del loss, gt, prd_final, prd + + gt_lst=np.concatenate(gt_lst, axis=1) + pred_lst=np.concatenate(pred_lst, axis=1) + + gt_lst=np.transpose(gt_lst) + pred_lst=np.transpose(pred_lst) + + # Now compute and display the average + count=count+1 # since it began from 0 + avg_loss=tot_loss/count + + # sens_lst, spec_lst, acc_lst, auc_lst=compute_performance(pred_lst, gt_lst) + _, _, _, auc_lst=compute_performance(pred_lst, gt_lst) + avg_auc=np.mean(auc_lst) + + print ("\n Val_Loss: {:.4f}, Avg. AUC: {:.4f}".format(avg_loss, avg_auc)) + metric=avg_auc # this will be monitored for Early Stopping + + cnv_lyr.train() + backbone_model.train() + fc_layers.train() + if gnn_model is not None: + gnn_model.train() + return metric + +#####To be used for inference########## +def load_inference_model(config, run_type): + if config['gnn']=='True': + gnn=True + else: + gnn=False + if run_type == 'pytorch': + model = Infer_model(config['backbone'],config['split_npz'],gnn) + checkpoint = torch.load(config['model_file'], map_location=torch.device('cpu')) + glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] + glbl_backbone_wt=checkpoint['backbone_model_state_dict'] + glbl_fc_wt=checkpoint['fc_layers_state_dict'] + + model.cnv_lyr.load_state_dict(glbl_cnv_wt) + model.backbone_model.load_state_dict(glbl_backbone_wt) + model.fc_layers.load_state_dict(glbl_fc_wt) + if gnn: + sit0_gnn_wt=checkpoint['sit0_gnn_model'] + sit1_gnn_wt=checkpoint['sit1_gnn_model'] + sit2_gnn_wt=checkpoint['sit2_gnn_model'] + sit3_gnn_wt=checkpoint['sit3_gnn_model'] + sit4_gnn_wt=checkpoint['sit4_gnn_model'] + glbl_gnn_wt=aggregate_local_weights(sit0_gnn_wt, sit1_gnn_wt, sit2_gnn_wt, + sit3_gnn_wt, sit4_gnn_wt, torch.device('cpu')) + model.gnn_model.load_state_dict(glbl_gnn_wt) + model.eval() + + elif run_type == 'onnx': + model = onnxruntime.InferenceSession(os.path.splitext(config['checkpoint'])[0] + ".onnx") + + else: + ie = IECore() + split_text = os.path.splitext(config['checkpoint'])[0] + model_xml = split_text + ".xml" + model_bin = split_text + ".bin" + model_temp = ie.read_network(model_xml, model_bin) + model = ie.load_network(network=model_temp, device_name='CPU') + + return model +def validate_model(model, config, run_type): + # GPU transfer - Only pytorch models needs to be transfered. + max_samples = config['max_samples'] + device = torch.device('cpu') + if run_type == 'pytorch': + if torch.cuda.is_available() and config['gpu'] == 'True': + device = torch.device('cuda') + model = model.cuda() + data_test=construct_dataset(config['data'], config['split_npz'], -999, test_transform, tn_vl_idx=2) + test_loader=DataLoader(data_test,batch_size=1, shuffle=False, num_workers=1, pin_memory=False) + tot_loss=0 + tot_auc=0 + + gt_lst=[] + pred_lst=[] + criterion = Custom_Loss(-999,device) + + count = 0 + with torch.no_grad(): + for count, sample in enumerate(test_loader): + count=count+1 + img=sample['img'] + gt=sample['gt'] + if torch.cuda.is_available() and config['gpu'] == 'True': + img = img.cuda() + gt = gt.cuda() + if run_type == 'pytorch': + prd_final= model(img) # forward through encoder + elif run_type == 'onnx': + ort_inputs = {model.get_inputs()[0].name: to_numpy(img)} + prd_final = model.run(None, ort_inputs) + to_tensor = transforms.ToTensor() + prd_final = np.array(prd_final) + # prd_final = np.squeeze(prd_final,axis=0) + prd_final = to_tensor(prd_final)#.unsqueeze(0) + prd_final = prd_final.squeeze(1).transpose(1,0) + gt = gt.cpu() + else: + to_tensor = transforms.ToTensor() + prd_final = model.infer(inputs={'input': img.cpu()})['output'] + prd_final = np.array(prd_final) + # prd_final = np.squeeze(prd_final,axis=0) + prd_final = to_tensor(prd_final)# + prd_final = prd_final.squeeze(0) + gt=gt.cpu() + loss=criterion(prd_final, gt) + + # Apply the sigmoid + prd_final=F.sigmoid(prd_final) + + gt_lst.append(gt.cpu().numpy()) + pred_lst.append(prd_final.cpu().numpy()) + + + tot_loss=tot_loss+loss.cpu().numpy() + + del loss, gt, prd_final + if count==max_samples: + break + + + gt_lst=np.concatenate(gt_lst, axis=1) + pred_lst=np.concatenate(pred_lst, axis=1) + + gt_lst=np.transpose(gt_lst) + pred_lst=np.transpose(pred_lst) + + # Now compute and display the average + count=count+1 # since it began from 0 + avg_loss=tot_loss/count + + # sens_lst, spec_lst, acc_lst, auc_lst=compute_performance(pred_lst, gt_lst) + _, _, _, auc_lst=compute_performance(pred_lst, gt_lst) + avg_auc=np.mean(auc_lst) + + + print ("\n Test_Loss: {:.4f}, Avg. AUC: {:.4f}".format(avg_loss, avg_auc)) + +def inference_model(config, run_type): + model = load_inference_model(config, run_type) + validate_model(model, config, run_type) \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py new file mode 100644 index 00000000000..9b4e7512e58 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py @@ -0,0 +1,75 @@ +from torch import nn +import torch +import numpy as np + +############# Define the Weighted Loss. The weights are different for each class ######## +class Custom_Loss(nn.Module): + def __init__(self, site, device=torch.device('cpu')): + super(Custom_Loss, self).__init__() + if site==-999: + wts_pos = np.array([ 6.07201409, 12.57545272, 5.07639982, 1.29352719, 14.83679525, + 2.61834939, 9.25154963, 22.75312856, 4.12082252, 7.02592567, + 1.58836049, 38.86513797, 15.04438092, 1.17096019]) + + wts_neg = np.array([0.68019345, 0.64294624, 0.69547317, 1.16038896, 0.63797481, 0.79812282, + 0.6549775, 0.62857107, 0.71829276, 0.67000328, 0.99474773, 0.62145383, + 0.63759652, 1.2806393 ]) + elif site==0: + wts_pos=np.array([636.94267516, 13.93728223, 5.28038864, 5.26537489, 87.87346221, 8.61623298, + 67.56756757, 228.83295195, 20.40816327, 79.42811755, 5.70450656, 276.24309392, + 87.79631255, 5.6840789 ]) + + wts_neg=np.array([3.14594016, 4.0373047, 7.68875903, 7.72081532, 3.24612089, 4.91690432, + 3.28256303, 3.17389786, 3.69767786, 3.2589213, 6.93769946, 3.16636059, + 3.24622626, 6.96815553]) + elif site==1: + wts_pos=np.array([ 31.82686187, 649.35064935, 568.18181818, 11.06439478, 75.75757576, + 16.73920321, 11.19319454, 27.94076558, 25.4517689, 158.73015873, + 11.25999324, 387.59689922, 88.73114463, 7.74653343]) + + wts_neg= np.array([3.40901343, 3.09386795, 3.09597523, 4.26657565, 3.20965464, 3.77330013, + 4.24772747, 3.46056684, 3.50299506, 3.14011179, 4.23818606, 3.10385499, + 3.18989441, 5.11064547]) + elif site==2: + wts_pos= np.array([653.59477124, 662.25165563, 584.79532164, 4.56350112, 45.12635379, 11.55401502, + 675.67567568, 746.26865672, 14.69723692, 29.20560748, 5.70418116, 159.23566879, + 87.03220191, 5.50721445]) + + wts_neg= np.array([3.00057011, 3.00039005, 3.0021916, 8.645284, 3.19856704, 4.02819738, 3.00012, + 2.99886043, 3.74868796, 3.3271227, 6.26998558, 3.04395471, 3.09300671, 6.52656311]) + elif site==3: + wts_pos=np.array([359.71223022, 675.67567568, 515.46391753, 6.02772755, 65.40222368, 16.94053871, + 800.0, 740.74074074,19.9960008, 11.29433025, 10.53962901, 78.49293564, 87.2600349, + 7.8486775 ]) + + wts_neg=np.array([3.18775901, 3.17460317, 3.17924588, 6.64098818, 3.32016335, 3.88424937, 3.1722869, + 3.17329356, 3.75276767, 4.38711942, 4.51263538, 3.29228946, 3.27847354, 5.28904638]) + elif site==4: + wts_pos=np.array([7.84990973, 308.64197531, 454.54545455, 9.28074246, 186.21973929, 16.51800463, + 819.67213115, 909.09090909, 27.52546105, 1515.15151515, 10.49538203, 1960.78431373, + 47.93863854, 4.16684029]) + + wts_neg=np.array([ 4.71720364, 2.97495091, 2.96577496, 4.31723007, 2.99392234, 3.58628604, + 2.95718003, 2.95613102, 3.29978551, 2.95229098, 4.09668169, 2.95098415, + 3.13952028, 10.06137438]) + + wts_pos = torch.from_numpy(wts_pos) + wts_pos = wts_pos.type(torch.Tensor) + wts_pos=wts_pos.to(device) # size 1 by cls + + wts_neg = torch.from_numpy(wts_neg) + wts_neg = wts_neg.type(torch.Tensor) + wts_neg=wts_neg.to(device) # size 1 by cls + + self.wts_pos=wts_pos + self.wts_neg=wts_neg + self.bce=nn.BCEWithLogitsLoss(reduction='none') + + def forward(self, ypred, ytrue): + msk = ((1-ytrue)*self.wts_neg) + (ytrue*self.wts_pos) #1 if ytrue is 0 + loss=self.bce(ypred,ytrue) # bsz, cls + loss=loss*msk + loss=loss.view(-1) # flatten all batches and class losses + loss=torch.mean(loss) + return loss + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py new file mode 100644 index 00000000000..0a23bb9d55d --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py @@ -0,0 +1,41 @@ +from sklearn.metrics import roc_auc_score +import numpy as np +def compute_performance(pred, gt): + #This function computes the performance metrics : Accuracy, AUC, Sensitivity and Specificity + acc_lst=[] + auc_lst=[] + sens_lst=[] + spec_lst=[] + pred_scr=pred.copy() + pred_cls=pred.copy() + idx0=np.where(pred_cls<0.5) + idx1=np.where(pred_cls>=0.5) + pred_cls[idx0]=0 + pred_cls[idx1]=1 + + for cls in range(0, pred_scr.shape[1]): + tmp_prd_scr=pred_scr[:,cls] + tmp_prd_cls=pred_cls[:, cls] + tmp_gt=gt[:, cls] + + TP=np.where((tmp_gt==1) & (tmp_prd_cls==1))[0].shape[0] + TN=np.where((tmp_gt==0) & (tmp_prd_cls==0))[0].shape[0] + FP=np.where((tmp_gt==0) & (tmp_prd_cls==1))[0].shape[0] + FN=np.where((tmp_gt==1) & (tmp_prd_cls==0))[0].shape[0] + + acc=(TP+TN)/(TP+TN+FP+FN) + sens=TP/(TP+FN) + spec=TN/(TN+FP) + auc=roc_auc_score(tmp_gt, tmp_prd_scr) + + sens_lst.append(sens) + spec_lst.append(spec) + acc_lst.append(acc) + auc_lst.append(auc) + + sens_lst=np.array(sens_lst) + spec_lst=np.array(spec_lst) + acc_lst=np.array(acc_lst) + auc_lst=np.array(auc_lst) + return sens_lst, spec_lst, acc_lst, auc_lst + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py new file mode 100644 index 00000000000..223d44abda7 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py @@ -0,0 +1,123 @@ +import numpy as np +from sklearn.metrics import cohen_kappa_score +import torch +import copy +def compute_edge_attr(A): + edge=[] + edge_attr=[] + for j in range(0,14): + for k in range(0,14): + if j==k: + continue + edge.append(np.array([j,k])) + edge_attr.append(A[j,k]) + + edge=np.array(edge) + edge_attr=np.array(edge_attr) + + edge=torch.from_numpy(np.transpose(edge)) + edge=edge.long() + + edge_attr=torch.from_numpy(edge_attr) + edge_attr=torch.unsqueeze(edge_attr, dim=1) + edge_attr=edge_attr.float() + + return edge, edge_attr + +def compute_adjacency_matrix(adj_type, site, split_npz='/storage/aneesh/split.npz'): + # load the npz file + a=np.load(split_npz, allow_pickle=True) + gt=a['gt'] + clstr_assgn=a['clstr_assgn'] + trn_val_tst=a['trn_val_tst'] + del a + + if site==-999: + idx=np.where(trn_val_tst==0)[0] + else: + idx=np.where((clstr_assgn==site) & (trn_val_tst==0))[0] + gt=gt[idx] + + kappa=np.zeros((14,14)) + TP=np.zeros((14,14)) + TN=np.zeros((14,14)) + FP=np.zeros((14,14)) + FN=np.zeros((14,14)) + kappa=np.zeros((14,14)) + agree=np.zeros((14,14)) + + for j in range(0,14): + gt_j=gt[j] + for k in range(0, 14): + gt_k=gt[k] + + ## Kappa and agree are symmetric ie., A(i,j)=A(j,i) + kappa[j,k]=cohen_kappa_score(gt_j, gt_k) + agree[j,k]=(np.where(gt_j==gt_k)[0].shape[0])/gt.shape[0] + + # How many times are both j and k =1---> This will be symmetric + TP[j,k]=(np.where((gt_j==1) & (gt_k==1))[0].shape[0])/gt.shape[0] + # How many times are both j and k=0 ---> This will be symmetric + TN[j,k]=(np.where((gt_j==0) & (gt_k==0))[0].shape[0])/gt.shape[0] + + ####### FP and FN will get reversed for A(i,j) and A(j,i) + # How many time k is 1 but j is 0 + FP[j,k]=(np.where((gt_j==0) & (gt_k==1))[0].shape[0])/gt.shape[0] + # How many time k is 0 but j is 1 + FN[j,k]=(np.where((gt_j==1) & (gt_k==0))[0].shape[0])/gt.shape[0] + + if adj_type=='kappa': + A=kappa + elif adj_type=='fraction_agreement': + A=agree + elif adj_type=='confusion_matrix': + A=np.concatenate((np.expand_dims(TP, axis=2), np.expand_dims(TN, axis=2), + np.expand_dims(FP, axis=2), np.expand_dims(FN, axis=2)), axis=2) + + if A.ndim==2: + tmp_edge, edge_attr=compute_edge_attr(A) + else: + edge_lst=[] + edge_attr_lst=[] + for x in range(A.shape[2]): + tmp_edge, tmp_edge_attr=compute_edge_attr(np.squeeze(A[:,:,x])) + edge_lst.append(tmp_edge) + edge_attr_lst.append(tmp_edge_attr) + edge_attr=torch.cat(edge_attr_lst, dim=1) + return tmp_edge, edge_attr + +################ Compute weighted average of model weights ################## +def average_weights(w, cmb_wt, device): + + cmb_wt=np.array(cmb_wt) + cmb_wt=cmb_wt.astype(np.float) + cmb_wt=cmb_wt/np.sum(cmb_wt) + wts = torch.tensor(cmb_wt).to(device) + wts=wts.float() + w_avg = copy.deepcopy(w[0]) + + for key in w_avg.keys(): # for each layer + layer = key.split('.')[-1] + if layer == 'num_batches_tracked': + for i in range(1,len(w)): # for each model + w_avg[key] += w[i][key].to(device) + w_avg[key] = torch.div(w_avg[key].float(), torch.tensor(len(w)).float()).to(torch.int64) + else: + w_avg[key]=torch.mul(w_avg[key].to(device), wts[0].to(float)) + for i in range(1,len(w)): + w_avg[key] += torch.mul(w[i][key].to(device), wts[i].to(float)) + return w_avg + +def aggregate_local_weights(wt0, wt1, wt2, wt3, wt4, device): + + wt=average_weights([wt0, wt1, wt2, wt3, wt4], + [1.0, 2030.0/1997, 2093.0/1997, 1978.0/1997, 2122.0/1997],device) + return wt + +def compute_lcl_wt(epoch, cmb_wts, glbl_wt, prev_lcl_wt, device): + + cmb_wt=cmb_wts[epoch] + lcl_wt=1-cmb_wt + wt=average_weights([prev_lcl_wt, glbl_wt], [lcl_wt, cmb_wt],device) + return wt + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py new file mode 100644 index 00000000000..462720f40c5 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -0,0 +1,237 @@ +from torch import nn +import torch +from torch_geometric.nn import NNConv +from torch_geometric.nn import BatchNorm as GNN_BatchNorm +from .misc import compute_adjacency_matrix +from torch_geometric.data import DataLoader as DataLoader_GNN +from torch_geometric.data import Data as Data_GNN +import torch.nn.functional as F +from torchvision import models +# Define Architecture +''' Instead of creating an instance of the models within the constructor, + We will pass the initial layer for 1 to 3 channel, the backbone model + and the FC layers part separately as input arguments. + + This will allow us to simply load the weights for each CNN model separately, + may be useful when updating only part of the network + ''' +class Fully_Connected_Layer(nn.Module): + def __init__(self, inp_dim, ftr_dim): + super(Fully_Connected_Layer, self).__init__() + + ftr_lyr=nn.ModuleList() + cls_lyr=nn.ModuleList() + + for cls in range(0,14): + ftr_lyr.append( + nn.Sequential( + nn.Linear(inp_dim, ftr_dim, bias=False), + nn.BatchNorm1d(ftr_dim), + nn.ReLU(), + nn.Linear(ftr_dim, ftr_dim, bias=False), + nn.BatchNorm1d(ftr_dim), + nn.ReLU() + ) + ) + + cls_lyr.append( + nn.Sequential( + nn.Linear(ftr_dim, 1, bias=False), + nn.BatchNorm1d(1) + ) + ) + self.ftr_lyr=ftr_lyr + self.cls_lyr=cls_lyr + + def forward(self, x): + prd_lst=[] + ftr_lst=[] + for cls in range(0,14): + + ftr=self.ftr_lyr[cls](x) + ftr_lst.append(torch.unsqueeze(ftr, dim=1)) + prd=self.cls_lyr[cls](ftr) + prd_lst.append(prd) + + + prd=torch.cat(prd_lst, axis=1) + return ftr_lst, prd + +############## Conv 1st layer ####################### + +class First_Conv(nn.Module): + def __init__(self): + super(First_Conv, self).__init__() + + # Convert 1 channel to 3 channel also can be made unique for each site + self.convert_channels=nn.Sequential(nn.Conv2d(1,3,1,1, bias=False), + nn.BatchNorm2d(3), + nn.ReLU() ) + def forward(self, x): + x=self.convert_channels(x) + return x + + +################## GNN Architecture Classes ############# + +# This MLP will map the edge weight to the weights used to avg. the features from the neighbors +class create_mlp(nn.Module): + def __init__(self, in_chnl, out): + super(create_mlp, self).__init__() + + self.lyr=nn.Sequential( + nn.Linear(in_chnl, out, bias=True), + #nn.BatchNorm1d(out), + nn.Tanh() + ) + + def forward(self, x): + out=self.lyr(x) + return out + +####################################################################################### + +# The Resdiual Block for the GNN +class Res_Graph_Conv_Lyr(nn.Module): + def __init__(self, in_chnls, base_chnls, mlp_model, aggr_md): + super(Res_Graph_Conv_Lyr, self).__init__() + + self.GNN_lyr=NNConv(in_chnls, base_chnls, mlp_model, aggr=aggr_md) + self.bn=GNN_BatchNorm(base_chnls) + + def forward(self, x, edge_index, edge_attr): + h=self.GNN_lyr(x, edge_index, edge_attr) + h=self.bn(h) + h=F.relu(h) + return x+h + +######################################################################################## + +############### The Graph Convolution Network ############################ +class GNN_Network(nn.Module): + def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): + super(GNN_Network, self).__init__() + + my_gcn=nn.ModuleList() + + # Base channels is actually the fraction of inp. + in_chnls=int(in_chnls) + base_chnls=int(base_chnls*in_chnls) + + # A GCN to map input channels to base channels dimensions + my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, base_chnls, create_mlp(ftr_dim, in_chnls*base_chnls), aggr_md)) + + in_chnls=base_chnls + for k in range(0, depth): + out_chnls=int(in_chnls*grwth_rate) + # Get a GCN + in_chnls=max(in_chnls,1) + out_chnls=max(out_chnls,1) + my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, out_chnls, create_mlp(ftr_dim,in_chnls*out_chnls), aggr_md)) + in_chnls=out_chnls + #### Add the final classification layer that will convert output to 1D + my_gcn.append(NNConv(in_chnls, 1, create_mlp(ftr_dim, 1*in_chnls), aggr='mean')) + + self.my_gcn=my_gcn + self.dpth=depth + + + def forward(self, data): + + x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr + cnt=0 + x=self.my_gcn[cnt](x, edge_index, edge_attr) + for k in range(0, self.dpth): + cnt=cnt+1 + #print(cnt) + x=self.my_gcn[cnt](x, edge_index, edge_attr) + cnt=cnt+1 + out=self.my_gcn[cnt](x, edge_index, edge_attr) # num_nodes by 1 + return out + + +############## Models specifically defined for inference ############# + + +#####Gnn model for inference, works without gnn dataloader + +class GNN_Network_infer(nn.Module): + def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim, edge_index, edge_attr): + super(GNN_Network_infer, self).__init__() + + my_gcn=nn.ModuleList() + # Base channels is actually the fraction of inp. + in_chnls=int(in_chnls) + base_chnls=int(base_chnls*in_chnls) + + # A GCN to map input channels to base channels dimensions + my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, base_chnls, create_mlp(ftr_dim, in_chnls*base_chnls), aggr_md)) + + in_chnls=base_chnls + for k in range(0, depth): + out_chnls=int(in_chnls*grwth_rate) + # Get a GCN + in_chnls=max(in_chnls,1) + out_chnls=max(out_chnls,1) + my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, out_chnls, create_mlp(ftr_dim,in_chnls*out_chnls), aggr_md)) + in_chnls=out_chnls + #### Add the final classification layer that will convert output to 1D + my_gcn.append(NNConv(in_chnls, 1, create_mlp(ftr_dim, 1*in_chnls), aggr='mean')) + + self.my_gcn=my_gcn + self.dpth=depth + self.edge_index, self.edge_attr= edge_index, edge_attr + + def forward(self, x): + cnt=0 + x=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + for k in range(0, self.dpth): + cnt=cnt+1 + x=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + cnt=cnt+1 + out=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + return out + +########Combined model for inference and export########### +class Infer_model(nn.Module): + def __init__(self, backbone,split_path, gnn=True): + super(Infer_model, self).__init__() + self.gnn = gnn + if backbone=='densenet': + inp_dim=1024 + backbone_model=models.densenet121(pretrained=True) + backbone_model.classifier=nn.Identity() + elif backbone=='resnet': + inp_dim=512 + backbone_model=models.resnet18(weights='IMAGENET1K_V1') + backbone_model.fc=nn.Identity() + + elif backbone=='xception': + inp_dim=2048 + backbone_model=xception.xception(pretrained=True) + backbone_model.fc=nn.Identity() + + cnv_lyr=First_Conv() + fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim=512) + self.edge_index, self.edge_attr= compute_adjacency_matrix('confusion_matrix', -999, split_path) + if gnn is True: + gnn_model=GNN_Network_infer(in_chnls=512, base_chnls=1, grwth_rate=1, depth=1, aggr_md='mean', ftr_dim=4,edge_index=self.edge_index, edge_attr=self.edge_attr) + self.cnv_lyr=cnv_lyr + self.backbone_model=backbone_model + self.fc_layers = fc_layers + if gnn is True: + self.gnn_model = gnn_model + self.DataLoader_GNN = DataLoader_GNN + self.Data_GNN = Data_GNN + + def forward(self, x): + img_3chnl=self.cnv_lyr(x) + gap_ftr=self.backbone_model(img_3chnl) + ftr_list, prd=self.fc_layers(gap_ftr) + ftr_list=torch.cat(ftr_list, dim=1) + ftr_list = ftr_list[0] + if self.gnn==True: + prd=self.gnn_model(x=ftr_list) + prd=prd.transpose(1,0) + return prd + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py new file mode 100644 index 00000000000..ee60a37361d --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -0,0 +1,659 @@ +import torch +from torch import nn +from .model import First_Conv, Fully_Connected_Layer, GNN_Network +from .dataloader import construct_dataset +from .loss import Custom_Loss +from torch.utils.data import DataLoader +from torchvision import models +from .misc import compute_adjacency_matrix +import copy +from torch_geometric.data import Data as Data_GNN +from torch_geometric.data import DataLoader as DataLoader_GNN +from .inference_utils import inference +from .transformations import train_transform, test_transform +from .misc import aggregate_local_weights, compute_lcl_wt + +# Train 1 batch update +def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, + trn_typ, criterion, device, edge_index=None, edge_attr=None): + ##Keep gnn_model and optim4 as None if training is to be done without GNN + img=sample['img'] + gt=sample['gt'] + + img=img.to(device) + gt=gt.to(device) + + ########Forward Pass ############## + img_3chnl=cnv_lyr(img) + gap_ftr=backbone_model(img_3chnl) + ftr_lst, prd=fc_layers(gap_ftr) + if gnn_model is not None: + ftr_lst=torch.cat(ftr_lst, dim=1) + + data_lst=[] + for k in range(0, ftr_lst.shape[0]): + data_lst.append(Data_GNN(x=ftr_lst[k,:,:], edge_index=edge_index, + edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) + loader = DataLoader_GNN(data_lst, batch_size=ftr_lst.shape[0]) + loader=next(iter(loader)).to(device) + gt=loader.y + + prd_final=gnn_model(loader) + else: + prd_final=prd + + loss=criterion(prd_final, gt) + + ####### Backward Pass ########## + ### Remove previous gradients + optim1.zero_grad() + optim2.zero_grad() + optim3.zero_grad() + if optim4 is not None: + optim4.zero_grad() + + + ### Compute Gradients + loss.backward() + + ### Optimizer Gradients + #update weights through backprop using Adam + + #if training is without gnn + if gnn_model is not None: + optim1.step() + optim2.step() + optim3.step() + return cnv_lyr, backbone_model, fc_layers, loss, optim1, optim2, optim3 + + #if training is with gnn + if trn_typ=='full': + optim1.step() + optim2.step() + + optim3.step() + optim4.step() + + + return cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4 + + +#### Train main +def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, + train_loader, trn_typ, n_batches, criterion, device, + edge_index=None, edge_attr=None): + + cnv_lyr.train() + backbone_model.train() + fc_layers.train() + + + ########## Optimizers and Schedulers ############# + #print(total_batches) + # lr=10**(-5) + + # optimizer + optim1 = torch.optim.Adam(cnv_lyr.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) + optim2 = torch.optim.Adam(backbone_model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) + optim3 = torch.optim.Adam(fc_layers.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) + if gnn_model is not None: + gnn_model.train() + optim4 = torch.optim.Adam(gnn_model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) + + cnt=0 + trn_run_loss=0 + for i, sample in enumerate(train_loader): + cnt=cnt+1 + if gnn_model is not None: + cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4=train_one_batch( + sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, + trn_typ, criterion, device, edge_index, edge_attr) + else: + #Set gnn_model and optim4 as None if training is to be done without GNN + cnv_lyr, backbone_model, fc_layers, loss, optim1, optim2, optim3=train_one_batch( + sample, cnv_lyr, backbone_model, fc_layers, None, optim1, optim2, optim3, None, + trn_typ, criterion, device) + trn_run_loss=trn_run_loss+loss + + if (i+1) % 20== 0: # displays after every 20 batch updates + print ("cnt {}, Train Loss: {:.4f}".format(cnt,(trn_run_loss/(cnt))), end ="\r") + + ############# Monitor Validation Acc and Early Stopping ############ + if cnt>=n_batches: + break + if gnn_model is None: + return cnv_lyr, backbone_model, fc_layers + + return cnv_lyr, backbone_model, fc_layers, gnn_model + +####For training with gnn +def lcl_train_gnn(lr, trn_loader, val_loader, criterion, cnv_lyr, + backbone_model,fc_layers, gnn_model, edge_index, edge_attr, device): + + n_batches=1500 + ####### Freeze and train the part which is specific to each site only + print('Freeze global CNN, fine-tune GNN ...') + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, + fc_layers, gnn_model, trn_loader,'gnn', n_batches, criterion, device, + edge_index, edge_attr) + + ###### Compute the Validation accuracy ####### + print('Computing Validation Performance ...') + prev_val=inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, criterion, device, + edge_index, edge_attr) + + ######## Train the entire network in an end-to-end manner ### + print('Train end-to-end for Local Site ...') + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, + gnn_model, trn_loader,'full', 2*n_batches, criterion, device, + edge_index, edge_attr) + + cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) + backbone_wt=copy.deepcopy(backbone_model.state_dict()) + fc_wt=copy.deepcopy(fc_layers.state_dict()) + gnn_wt=copy.deepcopy(gnn_model.state_dict()) + + return prev_val, cnv_wt,backbone_wt, fc_wt, gnn_wt + +####For training without gnn +def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc_layers, device): + n_batches = 4000 + ###### Compute the Validation accuracy ####### + prev_val=inference(cnv_lyr1, backbone_model, fc_layers, None, val_loader, criterion, device) + + ######## Train the entire network in an end-to-end manner ### + train_end_to_end(lr, cnv_lyr1, backbone_model, fc_layers, None, trn_loader, None, n_batches, criterion, device) + + + cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + backbone_wt=copy.deepcopy(backbone_model.state_dict()) + fc_wt=copy.deepcopy(fc_layers.state_dict()) + return prev_val, cnv_lyr1_wt,backbone_wt, fc_wt + +def initialize_training(site, img_pth, split_npz, train_transform, test_transform, b_sz, device): + + data_trn=construct_dataset(img_pth, split_npz, site, train_transform, tn_vl_idx=0) + trn_loader=DataLoader(data_trn,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + + data_val=construct_dataset(img_pth, split_npz, site, test_transform, tn_vl_idx=1) + val_loader=DataLoader(data_val, 1, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + criterion=Custom_Loss(site, device) + edge_index, edge_attr= compute_adjacency_matrix('confusion_matrix', site, split_npz) + + + return trn_loader, val_loader, criterion, edge_index, edge_attr + + + +def initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model): + + cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) + backbone_wt=copy.deepcopy(backbone_model.state_dict()) + fc_wt=copy.deepcopy(fc_layers.state_dict()) + gnn_wt=copy.deepcopy(gnn_model.state_dict()) + + return cnv_wt, backbone_wt, fc_wt, gnn_wt + + + + +def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gnn_wt=None, sit1_gnn_wt=None, + sit2_gnn_wt=None,sit3_gnn_wt=None, sit4_gnn_wt=None): + torch.save({ + 'cnv_lyr_state_dict': glbl_cnv_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + 'sit0_gnn_model': sit0_gnn_wt, + 'sit1_gnn_model': sit1_gnn_wt, + 'sit2_gnn_model': sit2_gnn_wt, + 'sit3_gnn_model': sit3_gnn_wt, + 'sit4_gnn_model': sit4_gnn_wt, + }, mx_nm) + + +def instantiate_architecture(ftr_dim, model_name, gnn=False): + # If gnn=True, then instantiate the GNN architecture + + if model_name=='densenet': + inp_dim=1024 + backbone_model=models.densenet121(pretrained=True) + backbone_model.classifier=nn.Identity() + elif model_name=='resnet': + inp_dim=512 + backbone_model=models.resnet18(pretrained=True) + backbone_model.fc=nn.Identity() + + elif model_name=='xception': + inp_dim=2048 + backbone_model=xception.xception(pretrained=True) + backbone_model.fc=nn.Identity() + + + cnv_lyr=First_Conv() + fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim) + if gnn: + gnn_model=GNN_Network(in_chnls=512, base_chnls=1, grwth_rate=1, depth=1, aggr_md='mean', ftr_dim=4) + return cnv_lyr, backbone_model, fc_layers, gnn_model + + return cnv_lyr, backbone_model, fc_layers + + +#Main function for training +def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transform, + max_epochs, backbone, device, restart_checkpoint='', savepoint=''): + + ###### Instantiate the CNN-GNN Architecture ############## + cnv_lyr, backbone_model, fc_layers, gnn_model=instantiate_architecture(ftr_dim=512, model_name=backbone, gnn=True) + cnv_lyr = cnv_lyr.to(device) + fc_layers = fc_layers.to(device) + backbone_model = backbone_model.to(device) + gnn_model = gnn_model.to(device) + + ##################################################################################### + ############## Initialize Data Loaders ################# + + trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader1, val_loader1, criterion1, edge_index1, edge_attr1=initialize_training(1, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader2, val_loader2, criterion2, edge_index2, edge_attr2=initialize_training(2, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader3, val_loader3, criterion3, edge_index3, edge_attr3=initialize_training(3, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + ######################################################################################### + ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone + #and identical model weights for the other layers. + + + glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, + fc_layers, gnn_model) + sit0_gnn_wt=copy.deepcopy(gnn_wt) + sit1_gnn_wt=copy.deepcopy(gnn_wt) + sit2_gnn_wt=copy.deepcopy(gnn_wt) + sit3_gnn_wt=copy.deepcopy(gnn_wt) + sit4_gnn_wt=copy.deepcopy(gnn_wt) + + del gnn_wt + # Load previous checkpoint if resuming the training else comment out + if restart_checkpoint!='': + checkpoint=torch.load(restart_checkpoint) + glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] + glbl_backbone_wt=checkpoint['backbone_model_state_dict'] + glbl_fc_wt=checkpoint['fc_layers_state_dict'] + sit0_gnn_wt=checkpoint['sit0_gnn_model'] + sit1_gnn_wt=checkpoint['sit1_gnn_model'] + sit2_gnn_wt=checkpoint['sit2_gnn_model'] + sit3_gnn_wt=checkpoint['sit3_gnn_model'] + sit4_gnn_wt=checkpoint['sit4_gnn_model'] + + ########################################################################################## + ################ Begin Actual Training ############ + max_val=0 + for epoch in range(0, max_epochs): + print('############ Epoch: '+str(epoch)+' #################') + + ###### Load the global model weights ######## + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit0_gnn_wt) + + print('\n \n SITE 0 \n') + prv_val0, sit0_cnv_wt,sit0_backbone_wt, sit0_fc_wt, sit0_gnn_wt=lcl_train_gnn(lr, trn_loader0, val_loader0, + criterion0, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index0, edge_attr0, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit1_gnn_wt) + + print('\n \n SITE 1 \n') + prv_val1, sit1_cnv_wt,sit1_backbone_wt, sit1_fc_wt, sit1_gnn_wt=lcl_train_gnn(lr, trn_loader1, val_loader1, + criterion1, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index1, edge_attr1, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit2_gnn_wt) + + print('\n \n SITE 2 \n') + prv_val2, sit2_cnv_wt,sit2_backbone_wt, sit2_fc_wt, sit2_gnn_wt=lcl_train_gnn(lr, trn_loader2, val_loader2, + criterion2, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index2, edge_attr2, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit3_gnn_wt) + + print('\n \n SITE 3 \n') + prv_val3, sit3_cnv_wt,sit3_backbone_wt, sit3_fc_wt, sit3_gnn_wt=lcl_train_gnn(lr, trn_loader3, val_loader3, + criterion3, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index3, edge_attr3, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit4_gnn_wt) + + print('\n \n SITE 4 \n') + prv_val4, sit4_cnv_wt,sit4_backbone_wt, sit4_fc_wt, sit4_gnn_wt=lcl_train_gnn(lr, trn_loader4, val_loader4, + criterion4, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index4, edge_attr4, device) + + + avg_val=(prv_val0+prv_val1+prv_val2+prv_val3+prv_val4)/5 + print('Avg Val AUC: '+str(avg_val)) + + if avg_val>max_val: + max_val=avg_val + mx_nm=savepoint+'best_weight_'+str(max_val)+'_'+str(epoch)+'.pt' + save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, + glbl_fc_wt, sit0_gnn_wt, sit1_gnn_wt, + sit2_gnn_wt, sit3_gnn_wt, sit4_gnn_wt) + print('Validation Performance Improved !') + + + ############### Compute the global model weights ############# + + glbl_cnv_wt=aggregate_local_weights(sit0_cnv_wt, sit1_cnv_wt, sit2_cnv_wt, + sit3_cnv_wt, sit4_cnv_wt, device) + + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, + sit3_backbone_wt, sit4_backbone_wt, device) + + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) + + +def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_transform, + test_transform, max_epochs, backbone, device, checkpoint='', savepath=''): + + cnv_lyr1, backbone_model, fc_layers = instantiate_architecture(ftr_dim=512, model_name=backbone) + cnv_lyr1 = cnv_lyr1.to(device) + backbone_model = backbone_model.to(device) + fc_layers = fc_layers.to(device) + if checkpoint!='': + checkpoint=torch.load(checkpoint) + ######The wights saved for model without gnn have cnv_lyr1_state_dict instead of cnv_lyr_state_dict.......but for trial weights for with gnn are used here + cnv_wt=checkpoint['cnv_lyr_state_dict'] + backbone_wt=checkpoint['backbone_model_state_dict'] + fc_wt=checkpoint['fc_layers_state_dict'] + cnv_lyr1.load_state_dict(cnv_wt) + backbone_model.load_state_dict(backbone_wt) + fc_layers.load_state_dict(fc_wt) + + ### Dataloaders and model weights for each site + # Site-0 + data_trn0=construct_dataset(img_pth, split_npz, site=0, transforms=train_transform, tn_vl_idx=0) + trn_loader0=DataLoader(data_trn0,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transform, tn_vl_idx=1) + val_loader0=DataLoader(data_val0, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-1 + data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transform, tn_vl_idx=0) + trn_loader1=DataLoader(data_trn1,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transform, tn_vl_idx=1) + val_loader1=DataLoader(data_val1, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-2 + data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transform, tn_vl_idx=0) + trn_loader2=DataLoader(data_trn2,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transform, tn_vl_idx=1) + val_loader2=DataLoader(data_val2, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-3 + data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transform, tn_vl_idx=0) + trn_loader3=DataLoader(data_trn3,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transform, tn_vl_idx=1) + val_loader3=DataLoader(data_val3, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + + # Site-4 + data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transform, tn_vl_idx=0) + trn_loader4=DataLoader(data_trn4,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transform, tn_vl_idx=1) + val_loader4=DataLoader(data_val4, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + + criterion = Custom_Loss(site=-999,device=device) + + + ###### Initialize model weights with pre-trained weights + ## Global + glbl_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + glbl_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + glbl_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site0 + sit0_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit0_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit0_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 1 + sit1_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit1_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit1_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 2 + sit2_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit2_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit2_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 3 + sit3_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit3_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit3_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 4 + sit4_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit4_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit4_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + + + ###### Now begin training + max_val=0 + for epoch in range(0, max_epochs): + + + + ############ Perform the local trainings for each site ##### + ## Site 0 + print('\n \n SITE 0 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit0_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit0_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit0_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val0, sit0_cnv_lyr1_wt,sit0_backbone_wt, sit0_fc_wt=lcl_train(lr, trn_loader0, val_loader0, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 1 + print('\n \n SITE 1 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit1_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit1_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit1_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val1, sit1_cnv_lyr1_wt,sit1_backbone_wt, sit1_fc_wt=lcl_train(lr, trn_loader1, val_loader1, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 2 + print('\n \n SITE 2 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit2_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit2_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit2_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val2, sit2_cnv_lyr1_wt,sit2_backbone_wt, sit2_fc_wt=lcl_train(lr, trn_loader2, val_loader2, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 3 + print('\n \n SITE 3 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit3_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit3_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit3_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val3, sit3_cnv_lyr1_wt,sit3_backbone_wt, sit3_fc_wt=lcl_train(lr, trn_loader3, val_loader3, criterion, + cnv_lyr1, backbone_model,fc_layers , device) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 4 + print('\n \n SITE 4 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit4_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit4_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val4, sit4_cnv_lyr1_wt,sit4_backbone_wt, sit4_fc_wt=lcl_train(lr, trn_loader4, val_loader4, criterion, + cnv_lyr1, backbone_model,fc_layers , device) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + avg_val=(prev_val0+prev_val1+prev_val2+prev_val3+prev_val4)/5 + + if avg_val>max_val: + max_val=avg_val + # save model weight, local weights + torch.save({ + 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + }, savepath+'best_glbl_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, + 'backbone_model_state_dict': sit0_backbone_wt, + 'fc_layers_state_dict': sit0_fc_wt, + }, savepath+'best_site0_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, + 'backbone_model_state_dict': sit1_backbone_wt, + 'fc_layers_state_dict': sit1_fc_wt, + }, savepath+'best_site1_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, + 'backbone_model_state_dict': sit2_backbone_wt, + 'fc_layers_state_dict': sit2_fc_wt, + }, savepath+'best_site2_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, + 'backbone_model_state_dict': sit3_backbone_wt, + 'fc_layers_state_dict': sit3_fc_wt, + }, savepath+'best_site3_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, + 'backbone_model_state_dict': sit4_backbone_wt, + 'fc_layers_state_dict': sit4_fc_wt, + }, savepath+'best_site4_weights.pth') + + ######### aggregate to compute global weight ############### + + glbl_cnv_lyr1_wt=aggregate_local_weights(sit0_cnv_lyr1_wt, sit1_cnv_lyr1_wt, sit2_cnv_lyr1_wt, + sit3_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) + + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, + sit3_backbone_wt, sit4_backbone_wt, device) + + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) + + + ###### Just before returning, save the final weights + + # save model weight, local weights + torch.save({ + 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + }, savepath+'final_glbl_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, + 'backbone_model_state_dict': sit0_backbone_wt, + 'fc_layers_state_dict': sit0_fc_wt, + }, savepath+'final_site0_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, + 'backbone_model_state_dict': sit1_backbone_wt, + 'fc_layers_state_dict': sit1_fc_wt, + }, savepath+'final_site1_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, + 'backbone_model_state_dict': sit2_backbone_wt, + 'fc_layers_state_dict': sit2_fc_wt, + }, savepath+'final_site2_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, + 'backbone_model_state_dict': sit3_backbone_wt, + 'fc_layers_state_dict': sit3_fc_wt, + }, savepath+'final_site3_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, + 'backbone_model_state_dict': sit4_backbone_wt, + 'fc_layers_state_dict': sit4_fc_wt, + }, savepath+'final_site4_weights.pth') + + return + +def train_model(config): + if torch.cuda.is_available() and config['gpu']=='True': + device = torch.device('cuda') + else: + device = torch.device('cpu') + if config['gnn']=="True": + trainer_with_GNN(config['lr'],config['batch_size'], config['data'], config['split_npz'], + train_transform, test_transform, config['epochs'], config['backbone'], + device, config['checkpoint'], config['savepath'] ) + else: + avg_schedule = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + trainer_without_GNN(avg_schedule, config['lr'], config['batch_size'], config['data'], + config['split_npz'], train_transform, test_transform, config['epochs'], + config['backbone'], device, config['checkpoint'], config['savepath']) + + diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py new file mode 100644 index 00000000000..aac6d33b9c1 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py @@ -0,0 +1,18 @@ +from torchvision import transforms +################# Data Augmentation and Transforms ##################### + +# Training Transformations/ Data Augmentation +train_transform=transforms.Compose([ + transforms.Resize(350), + transforms.RandomResizedCrop(320, scale=(0.8, 1.0)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]) + ]) +# Test/Val Transformations +test_transform=transforms.Compose([ + transforms.Resize(320), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]) + ]) + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py new file mode 100644 index 00000000000..6d56b92ea13 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -0,0 +1,94 @@ +import unittest +import os +import sys +sys.path.append(r'/storage/adityak/federated/') +from src.utils.downloader import download_checkpoint +from src.utils.exporter import Exporter +from src.utils.get_config import get_config + +def create_export_test_with_gnn(): + class ExportTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='export',gnn=True) + if not os.path.exists(cls.config['checkpoint']): + download_checkpoint(gnn=True) + cls.model_path = cls.config['checkpoint'] + + def test_export_onnx(self): + self.exporter = Exporter(self.config, gnn=True) + self.exporter.export_model_onnx() + self.assertTrue(os.path.join(os.path.split(self.model_path)[ + 0], self.config.get('model_name_onnx'))) + + def test_export_ir(self): + self.exporter = Exporter(self.config, gnn=True) + model_dir = os.path.split(self.config['checkpoint'])[0] + if not os.path.exists(os.path.join(model_dir, self.config.get('model_name_onnx'))): + self.exporter.export_model_onnx() + self.exporter.export_model_ir() + name_xml = self.config['model_name'] + '.xml' + name_bin = self.config['model_name'] + '.bin' + xml_status = os.path.exists(os.path.join(model_dir, name_xml)) + bin_status = os.path.exists(os.path.join(model_dir, name_bin)) + self.assertTrue(xml_status) + self.assertTrue(bin_status) + + def test_config(self): + self.config = get_config(action='export', gnn=True) + self.model_path = self.config['checkpoint'] + self.input_shape = self.config['input_shape'] + self.output_dir = os.path.split(self.model_path)[0] + self.assertTrue(self.output_dir) + self.assertTrue(self.model_path) + self.assertListEqual(self.input_shape, [1, 1, 320, 320]) + return ExportTest + +def create_export_test_without_gnn(): + class ExportTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='export',gnn=False) + if not os.path.exists(cls.config['checkpoint']): + download_checkpoint(gnn=False) + cls.model_path = cls.config['checkpoint'] + + def test_export_onnx(self): + self.exporter = Exporter(self.config, gnn=False) + self.exporter.export_model_onnx() + self.assertTrue(os.path.join(os.path.split(self.model_path)[ + 0], self.config.get('model_name_onnx'))) + + def test_export_ir(self): + self.exporter = Exporter(self.config, gnn=False) + model_dir = os.path.split(self.config['checkpoint'])[0] + if not os.path.exists(os.path.join(model_dir, self.config.get('model_name_onnx'))): + self.exporter.export_model_onnx() + self.exporter.export_model_ir() + name_xml = self.config['model_name'] + '.xml' + name_bin = self.config['model_name'] + '.bin' + xml_status = os.path.exists(os.path.join(model_dir, name_xml)) + bin_status = os.path.exists(os.path.join(model_dir, name_bin)) + self.assertTrue(xml_status) + self.assertTrue(bin_status) + + def test_config(self): + self.config = get_config(action='export', gnn=False) + self.model_path = self.config['checkpoint'] + self.input_shape = self.config['input_shape'] + self.output_dir = os.path.split(self.model_path)[0] + self.assertTrue(self.output_dir) + self.assertTrue(self.model_path) + self.assertListEqual(self.input_shape, [1, 1, 320, 320]) + return ExportTest + + + +class TestInferenceEff(create_export_test_without_gnn()): + 'Test case with gnn' + +class TestInference(create_export_test_with_gnn()): + 'Test case with gnn' + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py new file mode 100644 index 00000000000..e7d19aae761 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -0,0 +1,73 @@ +import os +import numpy as np +import unittest +import torchvision +import sys +sys.path.append(r'/storage/adityak/federated/') +from src.utils.inference_utils import inference_model +from torch.utils.data import DataLoader +from src.utils.get_config import get_config +from src.utils.downloader import download_data + +def create_inference_test_with_gnn(): + class InferenceTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='inference', gnn=True) + + if not os.path.exists(cls.config['data']): + download_data(gnn=True) + + def test_pytorch_inference(self): + + config = get_config(action='inference', gnn=True) + inference_model(config,'pytorch') + + def test_onnx_inference(self): + + config = get_config(action='inference', gnn=True) + inference_model(config,'onnx') + + def test_ir_inference(self): + + config = get_config(action='inference', gnn=True) + inference_model(config,'ir') + + return InferenceTest + +def create_inference_test_without_gnn(): + class InferenceTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.config = get_config(action='inference', gnn=False) + + if not os.path.exists(cls.config['data']): + download_data(gnn=False) + + def test_pytorch_inference(self): + + config = get_config(action='inference', gnn=False) + inference_model(config,'pytorch') + + def test_onnx_inference(self): + + config = get_config(action='inference', gnn=False) + inference_model(config,'onnx') + + def test_ir_inference(self): + + config = get_config(action='inference', gnn=False) + inference_model(config,'ir') + + return InferenceTest + + +class TestTrainer(create_inference_test_without_gnn()): + 'Test case for without gnn' + +class TestTrainerEff(create_inference_test_with_gnn()): + 'Test case for with gnn' + +if __name__ == '__main__': + + unittest.main() \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py new file mode 100644 index 00000000000..1cfa3ec35c6 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py @@ -0,0 +1,59 @@ +import unittest +import os +import sys +sys.path.append(r'/storage/adityak/federated/') +from src.utils.train_utils import train_model +from src.utils.downloader import download_checkpoint, download_data +from src.utils.get_config import get_config +from src.utils.train_utils import train_model + +def create_train_test_for_without_gnn(): + class TrainerTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + config = get_config(action='train', gnn=False) + cls.config = config + if not os.path.exists(config["data"]): + download_data(gnn=True) + + def test_trainer(self): + if not os.path.exists(self.config["checkpoint"]): + download_checkpoint(gnn=False) + train_model(self.config) + + def test_config(self): + self.config = get_config(action='train', gnn=False) + + return TrainerTest + +def create_train_test_for_with_gnn(): + class TrainerTestEff(unittest.TestCase): + @classmethod + def setUpClass(cls): + config = get_config(action='train', gnn=True) + cls.config = config + if not os.path.exists(config["data"]): + download_data(gnn=True) + + def test_trainer(self): + if not os.path.exists(self.config["checkpoint"]): + download_checkpoint(gnn=True) + train_model(self.config) + + def test_config(self): + self.config = get_config(action='train', gnn=True) + + return TrainerTestEff + + +class TestTrainer(create_train_test_for_without_gnn()): + 'Test case for without gnn' + + +class TestTrainerEff(create_train_test_for_with_gnn()): + 'Test case for with gnn' + + +if __name__ == '__main__': + + unittest.main() \ No newline at end of file From 4975a5d4cb49f37849c73629f174828a27bc848f Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Sat, 11 Feb 2023 20:02:53 +0530 Subject: [PATCH 11/47] Correcting the documentary for arguments in training and inference script --- .../configs/fl_with_gnn.json | 2 - .../configs/fl_without_gnn.json | 2 - .../src/inference.py | 46 ++++++++++++------- .../src/train.py | 36 +++++++++------ .../src/utils/inference_utils.py | 13 +----- .../test/test_inference.py | 2 +- 6 files changed, 54 insertions(+), 47 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json index 9a761af296b..f7d926abf1b 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json @@ -5,7 +5,6 @@ "batch_size": 8, "epochs": 1, "gpu": "True", - "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", "lr": 1e-5, "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", "savepath": "/storage/adityak/wt_without_gnn/", @@ -16,7 +15,6 @@ "data": "/storage/adityak/chest_x_ray/", "split_npz": "/storage/adityak/split.npz", "batch_size": 1, - "epochs": 5, "gpu": "True", "gnn":"True", "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json index e26c05f9e7c..3f111eb72ad 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json @@ -5,7 +5,6 @@ "batch_size": 12, "epochs": 1, "gpu": "True", - "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", "lr": 1e-5, "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", "savepath": "/storage/adityak/wt_without_gnn/", @@ -16,7 +15,6 @@ "data": "/storage/adityak/chest_x_ray/", "split_npz": "/storage/adityak/split.npz", "batch_size": 1, - "epochs": 5, "gpu": "True", "gnn":"False", "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py index 241558c308b..152ff44e74a 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py @@ -3,14 +3,14 @@ def main(args): config = { - 'model_file': args.model_file, - 'data': args.dataset, + 'data': args.data, + 'split_npz': args.split_npz, 'gpu': args.gpu, + 'gnn': args.gnn, 'backbone': args.backbone, - 'split_npz': args.split_npz, + 'checkpoint': args.checkpoint, 'max_samples': args.max_samples, - 'gnn': args.gnn, - 'checkpoint': args.checkpoint + 'model_file': args.model_file } inference_model(config,run_type=args.run_type) @@ -18,22 +18,34 @@ def main(args): if __name__ == '__main__': parser = argparse.ArgumentParser(""" - The inference script works on a folder level. - Provide this script a folder (--data) with full-scale mammograms which can be created with the dataprep.py script. - It also requires a model (--model_file) file to work on which is to be produced by train.py script. - All the full-scale mammograms will be processed on by one to collect bpp, ssim and psnr information which will be averaged to produce the numbers to be plotted. + Inference script. + Provide the folder paths which contains dataset. + Provide the folder paths which contains split.npz file. + (Optionally) You may choose to use GPU (--gpu). + (Optionally) You may choose to use GNN (--gnn). + (optionally) You may choose to use resnet/densenet/xception for backbone. Default is resnet (--backbone). + (Optionally) You may choose to provide path to model weights, to run inference using pytorch(--model_file). + (Optionally) You may choose to provide path to onnx or ir model, for running inference if run_type is onnx or ir(--checkpoint). + (Optionally) You may choose to provide the max no. of samples to run inference on(--max_samples). Default value is 10. + (Optionally) You may choose to provide run type(--run_type). Default runtype is pytorch. """) parser.add_argument('--data', type=str, required=True, help='Folder path of dataset') parser.add_argument('--split_npz', type=str, required=True, - help='Folder path to the full-scale mammogram images (test)') - parser.add_argument('--gpu', help='Want GPU ?',type=str) - parser.add_argument('--model_file', type=str, - required=True, help='Name of model and log files') - parser.add_argument('--max_samples', type=int, help='no. of max samples to infer on', default=10) - parser.add_argument('--gnn', action='store_true',type=str , help='using gnn or not?') - parser.add_argument('--backbone', type=str, required=False, default='resnet', help='backbone model') - parser.add_argument('--checkpoint', type=str, required=True, help='checkpoint to refer for onnx and ir models') + help='path to the split.npz file') + parser.add_argument('--gpu', type=str, help='Want GPU ?', required=False, default='False') + parser.add_argument('--model_file', type=str, required=False, + help='Path of model weights saved for running inference with pytorch') + parser.add_argument('--checkpoint', type=str, + required=False, help='Path of onnx model file to load for inference. Required if run type is onnx or ir') + parser.add_argument('--gnn', type=str, help='using gnn or not?', + required=False, default='False') + parser.add_argument('--backbone', type=str, required=False, default='resnet', + help='backbone model to be used from resnet/densenet/xception, by default resnet is used') + parser.add_argument('--max_samples', type=int, required=False, default=10, + help='Max no. of samples to run inference on' ) + parser.add_argument('--run_type', type=str, required=False, default='pytorch', + help='Chosse run type out of pytorch, onnx, ir. Default run type is pytorch') arguments = parser.parse_args() main(arguments) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py index cfa154e4784..ef4d939e778 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py @@ -3,12 +3,15 @@ def main(args): config = { 'data': args.data, - 'split': args.split, + 'split_npz': args.split_npz, 'batch_size': args.batch_size, 'epochs': args.epochs, 'gpu': args.gpu, - 'model_file_name': args.model_file_name, 'gnn': args.gnn, + 'backbone': args.backbone, + 'lr': args.lr, + 'checkpoint': args.checkpoint, + 'savepath': args.savepath, } train_model(config) @@ -17,27 +20,34 @@ def main(args): parser = argparse.ArgumentParser( """ Training script. - Provide the folder paths which contains dataset and split.npz file. - Provide a filename prefix to help differentiate model/log from different runs with different configurations. + Provide the folder paths which contains dataset. + Provide the folder paths which contains split.npz file. (Optionally) You may choose to provide batch size (--batch_size), number of epochs (--epoch), (Optionally) You may choose to use GPU (--gpu). (Optionally) You may choose to use GNN (--gnn). - (optionally) You may choose to use resnet/densenet/xception for backbone. Default is resnet. + (Optionally) You may choose to use resnet/densenet/xception for backbone. Default is resnet.(--backbone) + (Optionally) You may choose to provide checkpoint, path for model weights to resume training from(--checkpoint). + (Optionally) You may choose to provide path to save model weights(--savepath). + (Optionally) You may choose to provide learning rate(--lr). """ ) parser.add_argument('--data', type=str, required=True, help='Folder path of dataset') - parser.add_argument('--split', type=str, required=True, - help='Folder path to the full-scale mammogram images (test)') + parser.add_argument('--split_npz', type=str, required=True, + help='path to the split.npz file') parser.add_argument('-b', '--batch_size', type=int, - required=False, default=32, help='Batch size') + required=False, default=15, help='Batch size used for training and validation') parser.add_argument('-e', '--epochs', type=int, required=False, default=15, help='Max number of epochs to run') - parser.add_argument('--gpu', action='store_true', help='Want GPU ?') - parser.add_argument('--model_file_name', type=str, - required=True, help='Name of model and log files') - parser.add_argument('--gnn', action='store_true', help='using gnn or not?') - parser.add_argument('--backbone', type=str, required=False, default='resnet', help='backbone model') + parser.add_argument('--gpu', type=str, help='Want GPU ?', required=False, default='False') + parser.add_argument('--checkpoint', type=str, + required=False, help='Path of model weights to load for resuming training') + parser.add_argument('--gnn', type=str, help='using gnn or not?', + required=False, default='False') + parser.add_argument('--backbone', type=str, required=False, default='resnet', + help='backbone model to be used from resnet/densenet/xception, by default resnet is used') + parser.add_argument('--lr', type=float, required=False, default=0.0001, help='learning rate') + parser.add_argument('--savepath', type=str, required=False, default='model.pth', help='path to save the model') arguments = parser.parse_args() main(arguments) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py index fc4ad0ebdfe..aa5678b3674 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py @@ -103,7 +103,6 @@ def load_inference_model(config, run_type): glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] glbl_backbone_wt=checkpoint['backbone_model_state_dict'] glbl_fc_wt=checkpoint['fc_layers_state_dict'] - model.cnv_lyr.load_state_dict(glbl_cnv_wt) model.backbone_model.load_state_dict(glbl_backbone_wt) model.fc_layers.load_state_dict(glbl_fc_wt) @@ -117,10 +116,8 @@ def load_inference_model(config, run_type): sit3_gnn_wt, sit4_gnn_wt, torch.device('cpu')) model.gnn_model.load_state_dict(glbl_gnn_wt) model.eval() - elif run_type == 'onnx': model = onnxruntime.InferenceSession(os.path.splitext(config['checkpoint'])[0] + ".onnx") - else: ie = IECore() split_text = os.path.splitext(config['checkpoint'])[0] @@ -141,12 +138,9 @@ def validate_model(model, config, run_type): data_test=construct_dataset(config['data'], config['split_npz'], -999, test_transform, tn_vl_idx=2) test_loader=DataLoader(data_test,batch_size=1, shuffle=False, num_workers=1, pin_memory=False) tot_loss=0 - tot_auc=0 - gt_lst=[] pred_lst=[] criterion = Custom_Loss(-999,device) - count = 0 with torch.no_grad(): for count, sample in enumerate(test_loader): @@ -193,19 +187,14 @@ def validate_model(model, config, run_type): gt_lst=np.concatenate(gt_lst, axis=1) pred_lst=np.concatenate(pred_lst, axis=1) - gt_lst=np.transpose(gt_lst) pred_lst=np.transpose(pred_lst) - # Now compute and display the average count=count+1 # since it began from 0 avg_loss=tot_loss/count - - # sens_lst, spec_lst, acc_lst, auc_lst=compute_performance(pred_lst, gt_lst) + # sens_lst, spec_lst, acc_lst, auc_lst are returned by compute_performance(pred_lst, gt_lst) _, _, _, auc_lst=compute_performance(pred_lst, gt_lst) avg_auc=np.mean(auc_lst) - - print ("\n Test_Loss: {:.4f}, Avg. AUC: {:.4f}".format(avg_loss, avg_auc)) def inference_model(config, run_type): diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py index e7d19aae761..f4c0fec3947 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -13,8 +13,8 @@ def create_inference_test_with_gnn(): class InferenceTest(unittest.TestCase): @classmethod def setUpClass(cls): + cls.config = get_config(action='inference', gnn=True) - if not os.path.exists(cls.config['data']): download_data(gnn=True) From 80a8fc2667e8e5772575f605869b50f7c994ed72 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 13 Feb 2023 14:38:44 +0530 Subject: [PATCH 12/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/test/test_export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py index 6d56b92ea13..d40d59bf2b3 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -85,7 +85,7 @@ def test_config(self): class TestInferenceEff(create_export_test_without_gnn()): - 'Test case with gnn' + 'Test case without gnn' class TestInference(create_export_test_with_gnn()): 'Test case with gnn' From d0e6c9abe8e2204c28481335f5a77afe4d9b1187 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 13 Feb 2023 14:39:36 +0530 Subject: [PATCH 13/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/test/test_inference.py | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py index f4c0fec3947..7f222652b9d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -3,7 +3,6 @@ import unittest import torchvision import sys -sys.path.append(r'/storage/adityak/federated/') from src.utils.inference_utils import inference_model from torch.utils.data import DataLoader from src.utils.get_config import get_config From 8b847d80e2c0890ce8ed1791843a1311076173f1 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 13 Feb 2023 14:40:17 +0530 Subject: [PATCH 14/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/test/test_train.py | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py index 1cfa3ec35c6..db5753c0188 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py @@ -1,7 +1,6 @@ import unittest import os import sys -sys.path.append(r'/storage/adityak/federated/') from src.utils.train_utils import train_model from src.utils.downloader import download_checkpoint, download_data from src.utils.get_config import get_config From 41cdb38844c4e7508afdb240bb37b1ebdb713932 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:28:53 +0530 Subject: [PATCH 15/47] Update README.md Updating the directory --- .../README.md | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index c0109ab3715..250380a93af 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -36,9 +36,7 @@ The overall performance of the proposed CNN-GNN is ## Model -Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/high_low/weights.zip). - -Inference models will be made available in the [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public) as well. +Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/high_low/weights.zip). Note: The ONNX and IR representation models accepts inputs of fixed size mentioned in configuration file. This needs to be updated based on the input size. @@ -53,31 +51,32 @@ Note: The ONNX and IR representation models accepts inputs of fixed size mention ``` -radiology_compression/ +federated_chest_screening/ src/ utils/ - dataprep.py - downloader.py + dataloader.py downloader.py - evaluators.py exporter.py - generate.py get_config.py inference_utils.py + loss.py + metric.py + misc.py model.py train_utils.py + transformation.py export.py inference.py train.py configs/ - phase1_config.json - phase2_config.json - download_configs.json + download_configs.json + fl_with_gnn.json + fl_without_gnn.json media/ tests/ - test_export.py - test_inference.py - test_train.py + test_export.py + test_inference.py + test_train.py init_venv.sh README.md requirements.txt From 4976df1562339546046653eab964206231e455e3 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:30:25 +0530 Subject: [PATCH 16/47] Update init_venv.sh --- .../chest_xray_screening_federated_gcn/init_venv.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh index 1edc95f3985..bd822af71ea 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/init_venv.sh @@ -15,7 +15,7 @@ if [ -e venv ]; then echo "$ . venv/bin/activate" fi -virtualenv ${venv_dir} -p python3 --prompt="(compression)" +virtualenv ${venv_dir} -p python3 --prompt="(federated_gnn)" . ${venv_dir}/bin/activate From a6cd21799d5e309262bb5eff821099772c3f437b Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:31:26 +0530 Subject: [PATCH 17/47] Update setup.py --- .../chest_xray_screening_federated_gcn/setup.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py index 7764babd8c1..2a9a608e97e 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/setup.py @@ -1,5 +1,6 @@ from setuptools import setup, find_packages -setup(name='compression', +setup(name='federated_gnn', version='1.0', - packages=find_packages()) \ No newline at end of file + packages=find_packages()) + \ No newline at end of file From 1ae065cce432fbb18482ddb41fe9993dd613780c Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:32:36 +0530 Subject: [PATCH 18/47] Update inference_utils.py --- .../src/utils/inference_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py index aa5678b3674..b6d32e1b57b 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py @@ -199,4 +199,5 @@ def validate_model(model, config, run_type): def inference_model(config, run_type): model = load_inference_model(config, run_type) - validate_model(model, config, run_type) \ No newline at end of file + validate_model(model, config, run_type) + \ No newline at end of file From 14e3e762d06d32d51af9b611f39cebead35e1e24 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:39:39 +0530 Subject: [PATCH 19/47] Update model.py --- .../src/utils/model.py | 41 +++++++------------ 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index 462720f40c5..22aad37c06d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -24,22 +24,21 @@ def __init__(self, inp_dim, ftr_dim): for cls in range(0,14): ftr_lyr.append( - nn.Sequential( - nn.Linear(inp_dim, ftr_dim, bias=False), - nn.BatchNorm1d(ftr_dim), - nn.ReLU(), - nn.Linear(ftr_dim, ftr_dim, bias=False), - nn.BatchNorm1d(ftr_dim), - nn.ReLU() - ) - ) - + nn.Sequential( + nn.Linear(inp_dim, ftr_dim, bias=False), + nn.BatchNorm1d(ftr_dim), + nn.ReLU(), + nn.Linear(ftr_dim, ftr_dim, bias=False), + nn.BatchNorm1d(ftr_dim), + nn.ReLU() + ) + ) cls_lyr.append( - nn.Sequential( - nn.Linear(ftr_dim, 1, bias=False), - nn.BatchNorm1d(1) - ) - ) + nn.Sequential( + nn.Linear(ftr_dim, 1, bias=False), + nn.BatchNorm1d(1) + ) + ) self.ftr_lyr=ftr_lyr self.cls_lyr=cls_lyr @@ -47,18 +46,14 @@ def forward(self, x): prd_lst=[] ftr_lst=[] for cls in range(0,14): - ftr=self.ftr_lyr[cls](x) ftr_lst.append(torch.unsqueeze(ftr, dim=1)) prd=self.cls_lyr[cls](ftr) prd_lst.append(prd) - - prd=torch.cat(prd_lst, axis=1) return ftr_lst, prd ############## Conv 1st layer ####################### - class First_Conv(nn.Module): def __init__(self): super(First_Conv, self).__init__() @@ -71,9 +66,7 @@ def forward(self, x): x=self.convert_channels(x) return x - ################## GNN Architecture Classes ############# - # This MLP will map the edge weight to the weights used to avg. the features from the neighbors class create_mlp(nn.Module): def __init__(self, in_chnl, out): @@ -81,16 +74,13 @@ def __init__(self, in_chnl, out): self.lyr=nn.Sequential( nn.Linear(in_chnl, out, bias=True), - #nn.BatchNorm1d(out), nn.Tanh() ) - def forward(self, x): out=self.lyr(x) return out ####################################################################################### - # The Resdiual Block for the GNN class Res_Graph_Conv_Lyr(nn.Module): def __init__(self, in_chnls, base_chnls, mlp_model, aggr_md): @@ -135,7 +125,6 @@ def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): self.my_gcn=my_gcn self.dpth=depth - def forward(self, data): x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr @@ -149,10 +138,8 @@ def forward(self, data): out=self.my_gcn[cnt](x, edge_index, edge_attr) # num_nodes by 1 return out - ############## Models specifically defined for inference ############# - #####Gnn model for inference, works without gnn dataloader class GNN_Network_infer(nn.Module): From 00de0c269542f9c1ce962636bbcb61c4367c9124 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:40:55 +0530 Subject: [PATCH 20/47] Update transformations.py --- .../src/utils/transformations.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py index aac6d33b9c1..42dac7091b8 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py @@ -1,6 +1,4 @@ from torchvision import transforms -################# Data Augmentation and Transforms ##################### - # Training Transformations/ Data Augmentation train_transform=transforms.Compose([ transforms.Resize(350), From b498cc58e97494be91b2c81c518234c8a906302b Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 22:42:56 +0530 Subject: [PATCH 21/47] Update test_export.py --- .../chest_xray_screening_federated_gcn/test/test_export.py | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py index d40d59bf2b3..98939b0c21f 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -1,7 +1,6 @@ import unittest import os import sys -sys.path.append(r'/storage/adityak/federated/') from src.utils.downloader import download_checkpoint from src.utils.exporter import Exporter from src.utils.get_config import get_config From d5f8420cdf9ca3e9224a90d35a560a700ef0fa63 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 23:11:29 +0530 Subject: [PATCH 22/47] Updating loss.py Updating loss function to take weights from json file --- .../configs/loss_weights.json | 48 +++++++++++++++++ .../src/utils/loss.py | 51 ++----------------- 2 files changed, 52 insertions(+), 47 deletions(-) create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/loss_weights.json diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/loss_weights.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/loss_weights.json new file mode 100644 index 00000000000..41e9b5864cc --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/loss_weights.json @@ -0,0 +1,48 @@ +{ + "-999":{ + "wts_pos":[ 6.07201409, 12.57545272, 5.07639982, 1.29352719, 14.83679525, + 2.61834939, 9.25154963, 22.75312856, 4.12082252, 7.02592567, + 1.58836049, 38.86513797, 15.04438092, 1.17096019], + "wts_neg":[0.68019345, 0.64294624, 0.69547317, 1.16038896, 0.63797481, 0.79812282, + 0.6549775, 0.62857107, 0.71829276, 0.67000328, 0.99474773, 0.62145383, + 0.63759652, 1.2806393 ] + }, + "0":{ + "wts_pos":[636.94267516, 13.93728223, 5.28038864, 5.26537489, 87.87346221, 8.61623298, + 67.56756757, 228.83295195, 20.40816327, 79.42811755, 5.70450656, 276.24309392, + 87.79631255, 5.6840789 ], + "wts_neg":[3.14594016, 4.0373047, 7.68875903, 7.72081532, 3.24612089, 4.91690432, + 3.28256303, 3.17389786, 3.69767786, 3.2589213, 6.93769946, 3.16636059, + 3.24622626, 6.96815553] + }, + "1":{ + "wts_pos":[ 31.82686187, 649.35064935, 568.18181818, 11.06439478, 75.75757576, + 16.73920321, 11.19319454, 27.94076558, 25.4517689, 158.73015873, + 11.25999324, 387.59689922, 88.73114463, 7.74653343], + "wts_neg":[3.40901343, 3.09386795, 3.09597523, 4.26657565, 3.20965464, 3.77330013, + 4.24772747, 3.46056684, 3.50299506, 3.14011179, 4.23818606, 3.10385499, + 3.18989441, 5.11064547] + }, + "2":{ + "wts_pos":[653.59477124, 662.25165563, 584.79532164, 4.56350112, 45.12635379, 11.55401502, + 675.67567568, 746.26865672, 14.69723692, 29.20560748, 5.70418116, 159.23566879, + 87.03220191, 5.50721445], + "wts_neg":[3.00057011, 3.00039005, 3.0021916, 8.645284, 3.19856704, 4.02819738, 3.00012, + 2.99886043, 3.74868796, 3.3271227, 6.26998558, 3.04395471, 3.09300671, 6.52656311] + }, + "3":{ + "wts_pos":[359.71223022, 675.67567568, 515.46391753, 6.02772755, 65.40222368, 16.94053871, + 800.0, 740.74074074,19.9960008, 11.29433025, 10.53962901, 78.49293564, 87.2600349, + 7.8486775 ], + "wts_neg":[3.18775901, 3.17460317, 3.17924588, 6.64098818, 3.32016335, 3.88424937, 3.1722869, + 3.17329356, 3.75276767, 4.38711942, 4.51263538, 3.29228946, 3.27847354, 5.28904638] + }, + "4":{ + "wts_pos":[7.84990973, 308.64197531, 454.54545455, 9.28074246, 186.21973929, 16.51800463, + 819.67213115, 909.09090909, 27.52546105, 1515.15151515, 10.49538203, 1960.78431373, + 47.93863854, 4.16684029], + "wts_neg":[ 4.71720364, 2.97495091, 2.96577496, 4.31723007, 2.99392234, 3.58628604, + 2.95718003, 2.95613102, 3.29978551, 2.95229098, 4.09668169, 2.95098415, + 3.13952028, 10.06137438] + } +} \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py index 9b4e7512e58..5a661d2e75f 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py @@ -1,58 +1,15 @@ from torch import nn import torch import numpy as np - +from .get_config import get_config ############# Define the Weighted Loss. The weights are different for each class ######## class Custom_Loss(nn.Module): def __init__(self, site, device=torch.device('cpu')): super(Custom_Loss, self).__init__() - if site==-999: - wts_pos = np.array([ 6.07201409, 12.57545272, 5.07639982, 1.29352719, 14.83679525, - 2.61834939, 9.25154963, 22.75312856, 4.12082252, 7.02592567, - 1.58836049, 38.86513797, 15.04438092, 1.17096019]) - - wts_neg = np.array([0.68019345, 0.64294624, 0.69547317, 1.16038896, 0.63797481, 0.79812282, - 0.6549775, 0.62857107, 0.71829276, 0.67000328, 0.99474773, 0.62145383, - 0.63759652, 1.2806393 ]) - elif site==0: - wts_pos=np.array([636.94267516, 13.93728223, 5.28038864, 5.26537489, 87.87346221, 8.61623298, - 67.56756757, 228.83295195, 20.40816327, 79.42811755, 5.70450656, 276.24309392, - 87.79631255, 5.6840789 ]) - - wts_neg=np.array([3.14594016, 4.0373047, 7.68875903, 7.72081532, 3.24612089, 4.91690432, - 3.28256303, 3.17389786, 3.69767786, 3.2589213, 6.93769946, 3.16636059, - 3.24622626, 6.96815553]) - elif site==1: - wts_pos=np.array([ 31.82686187, 649.35064935, 568.18181818, 11.06439478, 75.75757576, - 16.73920321, 11.19319454, 27.94076558, 25.4517689, 158.73015873, - 11.25999324, 387.59689922, 88.73114463, 7.74653343]) - - wts_neg= np.array([3.40901343, 3.09386795, 3.09597523, 4.26657565, 3.20965464, 3.77330013, - 4.24772747, 3.46056684, 3.50299506, 3.14011179, 4.23818606, 3.10385499, - 3.18989441, 5.11064547]) - elif site==2: - wts_pos= np.array([653.59477124, 662.25165563, 584.79532164, 4.56350112, 45.12635379, 11.55401502, - 675.67567568, 746.26865672, 14.69723692, 29.20560748, 5.70418116, 159.23566879, - 87.03220191, 5.50721445]) - - wts_neg= np.array([3.00057011, 3.00039005, 3.0021916, 8.645284, 3.19856704, 4.02819738, 3.00012, - 2.99886043, 3.74868796, 3.3271227, 6.26998558, 3.04395471, 3.09300671, 6.52656311]) - elif site==3: - wts_pos=np.array([359.71223022, 675.67567568, 515.46391753, 6.02772755, 65.40222368, 16.94053871, - 800.0, 740.74074074,19.9960008, 11.29433025, 10.53962901, 78.49293564, 87.2600349, - 7.8486775 ]) - - wts_neg=np.array([3.18775901, 3.17460317, 3.17924588, 6.64098818, 3.32016335, 3.88424937, 3.1722869, - 3.17329356, 3.75276767, 4.38711942, 4.51263538, 3.29228946, 3.27847354, 5.28904638]) - elif site==4: - wts_pos=np.array([7.84990973, 308.64197531, 454.54545455, 9.28074246, 186.21973929, 16.51800463, - 819.67213115, 909.09090909, 27.52546105, 1515.15151515, 10.49538203, 1960.78431373, - 47.93863854, 4.16684029]) - - wts_neg=np.array([ 4.71720364, 2.97495091, 2.96577496, 4.31723007, 2.99392234, 3.58628604, - 2.95718003, 2.95613102, 3.29978551, 2.95229098, 4.09668169, 2.95098415, - 3.13952028, 10.06137438]) + config = get_config(action='loss') + wts_pos = np.array(config[str(site)]['wts_pos']) + wts_neg = np.array(config[str(site)]['wts_neg']) wts_pos = torch.from_numpy(wts_pos) wts_pos = wts_pos.type(torch.Tensor) wts_pos=wts_pos.to(device) # size 1 by cls From 449711dc5db65c77c3751be3814fd635caf79600 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 23:12:51 +0530 Subject: [PATCH 23/47] Update get_config.py --- .../chest_xray_screening_federated_gcn/src/utils/get_config.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py index c9d31e27ae3..40f85cbf953 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py @@ -10,6 +10,9 @@ def get_config(action, gnn=True, config_path=""): if action == 'download': with open(os.path.join(config_path, 'download_configs.json')) as f1: config = json.load(f1) + if action == 'loss': + with open(os.path.join(config_path, 'loss_weights.json')) as f1: + config = json.load(f1) else: if gnn is True: with open(os.path.join(config_path, 'fl_with_gnn.json')) as f1: From acddf37034690d7fc6c1344bd29ba8d6e63abdce Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 23:14:14 +0530 Subject: [PATCH 24/47] removing pycache --- .../__pycache__/dataloader.cpython-310.pyc | Bin 1528 -> 0 bytes .../__pycache__/downloader.cpython-310.pyc | Bin 1285 -> 0 bytes .../utils/__pycache__/exporter.cpython-310.pyc | Bin 2458 -> 0 bytes .../__pycache__/get_config.cpython-310.pyc | Bin 806 -> 0 bytes .../__pycache__/inference_utils.cpython-310.pyc | Bin 5082 -> 0 bytes .../src/utils/__pycache__/loss.cpython-310.pyc | Bin 2906 -> 0 bytes .../utils/__pycache__/metric.cpython-310.pyc | Bin 1029 -> 0 bytes .../src/utils/__pycache__/misc.cpython-310.pyc | Bin 3135 -> 0 bytes .../src/utils/__pycache__/misc.cpython-39.pyc | Bin 3138 -> 0 bytes .../src/utils/__pycache__/model.cpython-310.pyc | Bin 6160 -> 0 bytes .../src/utils/__pycache__/model.cpython-37.pyc | Bin 6431 -> 0 bytes .../src/utils/__pycache__/model.cpython-39.pyc | Bin 5565 -> 0 bytes .../__pycache__/train_utils.cpython-310.pyc | Bin 11837 -> 0 bytes .../__pycache__/transformations.cpython-310.pyc | Bin 476 -> 0 bytes 14 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/exporter.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/get_config.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/loss.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/train_utils.cpython-310.pyc delete mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/dataloader.cpython-310.pyc deleted file mode 100644 index 6288d3d14bfabb9c8e7432b98253aa44f99cb54e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1528 zcmZuxOK&7K5cZ>A^GE<`$tnvGBY{AD$q24I6bZCwB{V`Jr@2^7+nMPZcTZz`*v+W3 z7nTbWH;x>_k-y|CT=EwX(o*HlED^-5t}0hmm&^54_2%GUNMPkZ{aG$kLjJ(b-4(&+ z3|{*fh$NC~Qqwu5C^OA!KIcK^I+{nI<2s%vfv5BAOClo~zaTPJbT#;a&xewKL-OPX zVv(HsNEWUD9zUszg_@P9zPlJ~&fvB00a0X5C7ClxC3`{UTyhx!hXZ5+JeDbh&iotY zrm@a!OD8b8Rjy3n>-`#o5X1X9y!IC$j;v@Gg8s~|k}i?_m|Ug6Bj6eE82A8q0(|&9 z>9|bS%)IZ&HS305yk@^qnSpK890c8<Sm>+GT-f&qtskg2SGOsEdHc z9R&)tgKG?C=TMe$q4nmeXsdFql^;~~LNrCKtY-`7N2P{qio)7O<44Xk;`>4iXI*vt z_sf@t!qxKBv!?Zt-W1ZuPnXIlgS9XKH1R`cipE}SOl^IJr4p@M=3^gOr7z4Ox(sZq zD<_(E2inRha}cx+&eEci&pa*u4QMh9JwOi*w%>_)6Xd@>np(Fp0PnPrmAfp~(+ee) zDV&m1Ys%@?RoYJRd`wfB)ArJ5LR1YLEU-821ju2M(f(mHV-MIPp0SKFny`c>bVzrP z{s;T%K1hjev|o7tSNJ>FbYw+33fM450a3Ez9eW74UGa`wM;-5_6`%t?kxQ=AE&?4` zR+$8yWY7&CLRUaL{>XsxJ-xtA;td^;jswC>#=r8bxQlTDb|0S?Yc+98HK`h@o=r9v z6DV*!DH=I3%56`)P0C}BfE&u=k diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/downloader.cpython-310.pyc deleted file mode 100644 index 75f99d3eaad3323268d9137ef79aab1982ec43b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1285 zcmb_bJ8#=C5avs=;)l{EeWgHApqrNp$y^i#S`?kKBtTIuf*{h48pSe58cqUbYSy5C z!5;IM2y|+f{Dmy(9c`ycrw%2-<9Ej+dHO!8?)BOPTJ_^s5e9_(MCWef;JkxjK7f%# z(uy3Blu#m>=#Jgs$mXDwqVEtu)qkg0>WJI zGivEqU`dKOH5-=B1R_crDuXQ|!!2^gmbe9&1CEw)vBVe;Y|!wAp@0{_DR@C6R-R|G zv#YU?>u8~*NiMkyfwQt6IX)bXTxYS6dBS=}wNzC$Rps!&nzSUyc9m_zxOsppsb5G-Y=fIsBpRw+s>4LcoHG|2q+V~YztTfiR=qNu_ved>C3Tc~| z7A|UZ0!r9U11xnrMQ3N>?09(>CdLh$x1tpXsyH0i<;ddY<~bNttPNw)L&|7)6Ea5I zH~IxE{}&Mjhy_A=5fP)HC?aAM4n;%^qW-&x`T>-uAA)g0<~C0wl*FU>k0RdpVwrJO z+Ph-;z-fRPiGGZSJwbz{{LNtRmO;#Z1{r1p%zf@+x0oJzvU&>Pw}u88hrd- kMo+8HyK@WvklKqCy@A&4PU*;IHnE`Zq|V4?F0sC`ge$xkmhhfa<_Yq|3R|yX zzqR+801Zji`7S$@ijQ(3QxI=O!=aMHNK2mPag_2inGBC~5#y$d-^!hjU{qS*g2kw4 zqa!Z}vxOtv7Z!7bCtAQ=;e+X*^1psI&XtyGKSl-1uMeFzv@f9*eekR#rNx2Hi69H? z(~<(UOY7KHceP_?D9qprdqL3LOM7N#y?4xc^;`+NxeFTHxwo*0rCW0!B+xQ*N=LMA zTQe8958MOZ2HwKe$L)Dgx(h-qBXvOXR#F$FRwLQ!2iOZcmTWyyaT)s zysLYqU$)Dj?37*bqc7GLVg`fzMV#>uci^|dufFFQcy6;(62N7i>21Ui9he}7I5xp zD0d@~==11g_fQI{AdF(SQ1R|WCuy-OnUb*prqLkegMk`ye| z$q@Q`3SFz#3{-d)jU>;r>{-=GvhhUo;wT!+>BCVzSbjS@hYD7m$NRhM`2dFL<|_186kI|8W>N>5LBNmD(eV60v;YMd zZ-^c>v9gbyxvPJH&If7({57C? ztSEGoM*yJLc$)pnH3+D@`kq;E5b5}c7s*tHRNcjWc!iLr$6)@Bd6yHN~6Ui+krbc3c zs)BKZNQTF=U_B+DP7);xK2(uVU6^OC0$^m!G5O%t@eG|BTs|&EU^0NGidaWXMx%46 z902C4Sh{uz3aIS?s4upF+@v1@YJ9Rz`_uuBWyX0!Y4F!{>)lAg?Rz2F4?V0@CSQ3K zaC}rIspP!sa{hD@rS*)u21eBTND#pnJj3d!5b6rc5w-&F&9?&L-|4QI(XeGMqP|8` l)A3nh&L}Bj^BR~{?^0U%pVtn3fz_qJy}so`gnY zTRVcR>5A4M=oM9G+S~C|IBR-G-Vatx`}e%Y%9Z;H2V=PJ90Ij_0jE$sTMzb&_w$E)EXtX!+t(((R7=k{MZfqx$PV_9bTq%vH{MOny1 zEFbK>c$YhwRtAHhF;rHD%@TcSe37d%xlRpNX;L&4 zkK-&a(l|D3D)Z7XIZF|33&U*xz&Bnp);Y!wb}|RsDPmh4b(EC4aShE*jknE3`!iPg z1E?ds_3ux^N=ubY(l8OZzDcgaEETCrbSlD1jl;Rliz=L?diR?rvm4X>9WwiJo*?w- z5%D$y%IT2wNO!|}hhJEaf3?x04X4Z-{@8g`4Y44gBZbcz0!LX9bzs3Jnz>qVjGW=m Tuceq5=@~v!s5}G?oDAqsm3hiJ diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/inference_utils.cpython-310.pyc deleted file mode 100644 index f2d1871906409a1359607ff3f654c7d418f05923..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5082 zcmai2TXQ2v6`r0MNi!o^k|o)eyf^Pc2 zds^Q0re}*PhdfZVMc|Pa^q&Rg3ZC@u5ZvHwWb})Y#inU6BewcBBa1mI#pQlQ z*DchYzN_nO9yj_8U9XIq@p6Az*PT%-Zui^zJ!{l~4@cYG z(Mr79U)6SRv=(>!U0r9RUcBC4*L|v^Gx6E}*}45(|D2BYeE&SJ@ud%p{sq3u>%8#+ z>u>NTUxwu(Z}B$DOT5EZP=0~4YsTi`)hk7+1jtjjQ! z11bC@B}cH7U;qBqu^@MA{%|P5p^tG!#c?u5~BPo@DyvG}99iRUVw^b&+J_6<>0 z#*7^sGZT+>Y+Qr&QT=1KvuWo`BD_6~M3@eS!smIJhtd!Bax0ij^HMU6Cx;Y_43l&$ zJ~l)Pu0MZa8xs=#Fx>WeBoF;t+q)r_1rhS?R0P{o8I98Ih*lxQk_@!IH95>3IUZ=A zZ3>g(y~IQZMc_g#{w|_Y$D`-+8hUY-xp=&xkyY{3Fbm@tr%thXY0gWF8&f9EDyz^X zquKuUPt1^(vX9uS#xMT(o|!FWbyeqP&&V1mEtE~wnAxg)%wTEGYfWXS#u3Yw6;sP7 zTgp_eBZl|eSw|Hk{Xw$*l4{eregHdSTajhXRC~X|S8iF`#c4Et zZ5DDDBddyX;KNhRMcXdoD*W;C1N)dU^!qs;+1~Fc%5n!WR<%}}Yc<)Pt*9y@fE~3O zYBjaQZTMeOu39={Wb1NO)l~J!e9z9#WM{K;*?II`Q|Hn5g6wj4)>G^1!m)WoQD4Y5 za1Pez=b$LfxvqETBPK55Bor+h$Z2m!P@3dc6c2NAD1{9Tr`zIDj7(s6b1&q>a1bRt zyqmka?n^0hMmgZ!^kuL&NTYk<$3O-DZj|2plDF|L5|T3e#f^>kU)*|rH~ZSg#w!QI zt&LZ1TzzqF+B}on;ekKOOGq0eS0x!2+2)4G2Aw#$DaS}UU*;u%z%a~9>7G9c^Rhpg zgbB|bLZ878c6M@inxwayIEwH`z&eSLSvkLm|%4OxbIBC5?vhIO3uKgJ_b~ zOeT31D1_0%1W)d2;F*lmP?V^H9fy9BmnI@gWM04F)6sFcfL-4wGbV^)PV&7vW~^27-c%#t8wu(d*h@xvW#~@^yV$o1gnrAK z2rMDNtybBsyu64AErZ=C!RFX{Y{x$s7Ew4UK(81+orIVg0;%}HqB31n&|k+Ihf+j= zM#w@C$RVx;40fTWdl0s1GkP4;Va3lgn_BZsk2$u(YOvSQr^BwWmf2!u=0QWNMOxGL zO!L#T59?p8GyR6;pqIy*mTeV2;RXH0H4GLI$}avWB0^e?tk;nY3fKy~F>jf|i^vb--HHV15V*9!MiAzReiP$#9lr&@l~krw^WPw_&QcW``J9RlUq3dxqV$s!|O=F z$zhTA&LUxQH=T?kIY=h=`kv+qID zx*LxmZ$?ROU48q;rU}do<(;v(l~;?zA0&PpZni}kQx+Q#lz%=KFOmKY(i^0onuB*nDSOHJ=Xqv(<$rZ_OHJdJxCbYJ z1+n-egm>_0P2bno5WV}^=8~gjuIBZihjhL>|~8YeM|Q!xtQ0{A9+HFVjS&**(H$vRkBGkFJ= z7H0Da<^Y5uq}#8`8ns5mG>zF3H#Dv^C6EffS`u7MH8Jm|YJtm9*_^dF_|vS7`89w+ z#9lk-+ej`g-U9ny;cdhNd&GF>fkwNm`9HihwEwvmAS*m#ZQ61b?5h~topp;*X1zIU z^k8X{{}W3^wHG?pta877zj40_Mn}8)Irt*G{nlgr(Y>8 z(!9CmA@;Vy_lE4Kd8m0O`%_UhFvyNPgD-=t8RSZ~nyqDB^gXMYZ%>}%YqRsJqk08{ z?B5*}d?GDa(z`gQfbhvw+@P8rOu3)iL?6V@V2FDEe)#Y+`mO@2U6~9H zMuUjo)pYW;Oyh7r8Bu?m}_z~2;Hx?%|b#XN_d^Xl zk7QR%YVp{d{aIxov(3zh=C96w`XmSJSmGlL{21$_O{S}k#$Ezq;&qB4!%fL>tED>b zbCeIFhUQIELls+#<+qVM9FyT(Oko17$sprBUZJagNVq+ z)`^Ey{Duax<0uUZ!m>}{MI`879-5s$T4=pgr_PmL<~Dh7Uc$$HItpK=%WFzgc8nH2 WzI0GXKK|23%JNFi4dte!XQlA3zXI;-vGSVS?bt4+g;y3~45ENG(aab%|EuvQb((nP4OgRZK7x zOJ+%nD6Nbkrx9wp6`mu~45I8j#JJ+%V?eNkmShO6gr%x!hTKFN$r8dyy5f0jr3}qT ziwUi)grEtn7sQk~eG=@vV9n7@vhuu>GT7_}$`#Emr0^%k7j1kRNe}`dkPf0qQZ3m; zkc5L|$ZQE0BEV`GvaW#@3nhcaM(2WwL)iMVqR^_0oy##5N z+@pItB(7RTKMfk~p#SuXy4z1XCB^`+gAVHa7y|m=EzQ*9)!Vmf zKYU^Rzx@wR3`ThCTJ?cV(Q1UBCr2I`m8L@YA8UhGO{{;U!91IuW2+m4@ETM60DU`+ z@S>TI!cHyfjqu=`OXNQv9**!SRY3E<@&gbq4NVwV77~W=(cjEF@0p%!@DG<#8}?fg z0P~K)S6owT(ni$kv_IrgIP-@^=M_g z1~=dC8l92_uBNte*G4RB)!--J{-9g)7%YivD!PvC2};P_zCFgif~RhgFZR^hAmE!u z>iy>ze$(JXvlsO3xY?w^S#dGv;xK@>ugI-gSnx>00ka55qVg}}zg*T*gz)CyFAGiA z--EhtgboRh=!S4uo6&mEAC&xZ#hK=(Gr&?))l@ht65(6U`?6`b(h;7oyu57ddNaaZ zRkI8QnL`nNBkh-LyfO{p8(r=V$T}B-c$#(CdL!zOJzt#sTQ;viqoNi7w^}Gp6)ueAe@OuWBxW*|@@6>ihxFfLz z?;NKbeby@);VBu42*<;z z2+va9%Z@(~hj3k_AA5H$xU7?BQkNc%0+L+L>a~?03H(WJu+MHVKn+g{F82HsFePb& zo6AOig7BfL#=uiCA0kZNG>_w7g^NwdIGuX&>m(n9pY?fpY4l+^!e=Yex2-q^%E`@& zVb24)AiUI@GwHTN^2-hszStA%hj7Hr?n!m8p<{+QS87M^fX9R1Mvd<+Q^Ic4g+FKy z2k-`T$ET$OF!>u!-`jNo0;WYJWwaE7>xuES&*XV3)GBc^VR(_b`%yF0UUOT@#AcaIHZ~C9iA|=Afh)&DbAL>~vNg0HYBASrIf;9I3gxW++py@VB?$BP z&&9uD;K=MXh3u*sIvU~kHrHQOiA->l4ccP-{L6LzM}Z06@ykaVyj)DUWxLiqfP@Uw zJE|6*CP2xnSnX9ngR(K9NA27c;N8MIPtt4GLh|GH^*Me$4C<;~-|Oxe5xk>)QEBaK zFNFJEe-@Vh8ko>9YW3pwN?-!>bXAXBRS$#*eKqAitp)IsE@_h&90X-`Tknh^p&_VM zeBHuMJiSqr%I42UoDAkT@(L5OJ9H8B>L>jMCJbH`d(Ii;2R}+j*cKQZb>d+h!hnNO zif~*1eDC{F(10O7t+@02%@E*4`^Q54TG;%>jcq7J6@~ERVfV+DRl(t4E@Yd&`VTZ9 z7vJCQ!$ycK*l}^+sggxdoY8z@)>;;nr8AGs54M8R(zI@T1t{qc!ZO#l*N3X>3{>h?quG9 za(MGiknIYQCu4-q$X0b!6hjkUT}xdur?M2zMA32pmjNdOODl_)z?opz{TN+Q_XR9@ zc=PU>*L{HnwmyCOY~BbbNq#IX>RB?vs$ZtZG}Z=)P3T@wx+&`#q&jnqZ@@oK!+s8b z+BD+MUQkY4sCwLx2<2=koUr1v4zN5KrrE!HC-A-TdMlCkHCQ%&KTz6#3n*Q}7445% zC~zaWZ(qtru=LqEcL`wt%4GI@v8J*DEQJ?dpV{9NIu>-^KHnVC7ddH*)xxStr#Htk zc7tHDTGakdDQ~y2948(Vr(CBqSxkaX=ag7%PN|;b^m$IHVC4*%PKA+!Tis&MvE?~w zA8eU91{2^RYZ3JE_$L`;sD7zl+$l!9@M(drmi}?!!v#*IaUxe+-@;JKTmMrA{}`t~a|qu9Ub?aWOF}7X`xU>)AsCYc}h2ol*%C zA!0r*)_H-#i!UTO5kv+gxNK@Kr`$!m7>k7q5|OT&r7rq$zF=_5Cc83m!8^rv73`{2 W9`DX|jCc+5A|sT9oRpJ6^#1@OI@{L( diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/metric.cpython-310.pyc deleted file mode 100644 index dd8177854afa9f6294782c95fc76b128faf7893e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1029 zcmZXTOK%e~5XWt=ce73N==&iqz2=g@y(*PJJw`}yS_ws~wG%@2g}o6(@}3G8_!8|6 zzLHN!aOqdz#NXRcwba&(e=}pxIG#<>?Y0TV;Op;X$0y{MGwu%t;~b(-pac;#B^iw< zb<9#0dBPL?Gl{rhugSo_!7ehOwyTO{5-*d9P6{Q*iA(cu8~_5y3yA(s37L_aK|BbD z_=a4OYf=a6wN=q8W?F(S$Th9QJIjZ)ArLX)DG?sljZ;#2@C5I<@Q%qPdr97YtJ@~P zWyu$2+EaE+>P_UjZ+8rRV0W7eUEwX5={S9kx&kMW)t&FHOJxN$aaPpJCDO;!-Ntd>h1NDRW`{SgCk5IIz4_Qdl;tY%5NZNva*-i*XQYnd>!` zvhhWs+mG)_ttm;h<#X}rX``Q64~}TF%F0QpZui|8E)uxNtcuv1SCDbUF0fvVt%u?f zcl9jSs0GI|`9ahnF8yaX(p912nH*vd?Tq*p6NwcxJ{ylYsfDKQxLM&5x=Gvy+U$8?$6!{Bg C&G}yd diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-310.pyc deleted file mode 100644 index 10b7829be6565390ea49ef5be43debf96b392e2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3135 zcmaJ@&5s;M74NFe?@>CIU7{aEO!xf4~ekOGuIU6A%Z)pHRvTA>>oSg#)s*{Hl6py;&gL>Q}E`y;rYZ z)vMp@aidXn@O<)}zbAiPah$)I&eg|7=WTrXk3pEj$j}K1S~{d7H*`rM^Fq&#`JvBT z=6&ddCDvpmR{oHLWfrgsBmt|k8rll0vj*BKBR_Gvt>>86+3OO$l8pCx77gObB#!cA zEO-3**y5AG+}2&3wp93UV`o*~)YHH*&SmI0zIq!GA=j z6f;*aP8CtKM-isKCrmKUn}!(f8PP85kGfUui#SWUE+KB7F-^0H_G2O9GwsW%3_505y1%C+Acj$_&NHF>B#&M;@i&?uHtXmX&* z;F%+C!uP*l-_9|0obv6M_2pSS*zR%0MJzen&PB3)B>TgBd(_X9yYPvF8BLBPk1l63 zXOZ6q@rXy7cmqPoI$rvsTB?#Z4M>$5X+XuMaoBn3FylPm-_9q)zPy`Fo+c*2md_TL z%QOCzI*xP>u!H7&v8y;c=5`!&s?0;{qxI32K5!LbC7knZ>CC7?+6{0ftc!D*%J>3R0l>6A!tDoNy~5Y7@b!Z4c||*2Q)>sXvopOZt7`3>{FF}Dr#IBP zx?%1q@Z=`G4e3D>5Y}*c_|7oYZXVQzG{B`RhX087lYf3nX7K;(R315+Snv{Swrg<= zdmJ_{5iLSjlhd%VRJEvS(Xt>Lu+*DNB*g0DglEawVl&Nx^AM!z0`x2#26UZl(ZH{I zRdV%(hJi-9>VAXlxI1*myHDw7!CDDg=6j3OX`KXh?_VBSC3heJz`{n{!}MQ3OS7qF zPi;lqMNvV9qB1S&lermg!9(sD`7Kki{Gy7@%C>F+Q)ezJl`Df;1-FctdyQ(0%eY$)6hi*qZ}G7!!89UzrsP*nk$3m@NhTxZug{4P8< zjSl4SXx35K&xz^J>8vdMgVvlF9q4zV(O&4=wF2$2I%pxPIW4rVtzzrtDy;Rjm7vmN zWy={B>-xoES9z$=rJ_P>a`*WK9=(Qw8$F3-vd{AfRW})MCd@7CuGniL%zqQAc5bp* zv`xp&k48n7mvAxivCveGwU>-hL<7e8#B$z%pXInlrUKe~rQIRV%-_JV_WPOq)_v`c zj)vVplM`J^Mo*#>sY{F>_Y)qvCo&KHlPEq;wTp?gKa9^f%6tEMuD0T1RCJX86W&kv zWu9V%wq{q6UQNQ+ZlW~DjM8j> zr~DL&^LwKFbJrvYHk0mkA+*ou57J-l{^f&D9)UUg<>Pn$@nes1na z6Tc(gg-68?(Mb%_Egv&#j?{+r8W@Cx44(Q7!E7Si5vHqXia!VCgL(+Ctjab(n+xLm zFv3asd@@e*qdY*&_G j{ttx(yI^-TW=BK*mib-HS3v>?z+~U*&EVbOm3rwvIQalK diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/misc.cpython-39.pyc deleted file mode 100644 index 9d8eca916b279672480d8e5a5d78ab7e0259d05d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3138 zcmaJ@&5I+)74NFzAZ2NU-acXQ$?S9v`u_KtQHnG#8-u92j4 zMrMRKicX1gjHE+%93wf}JzBL|=${n&(8V0y)Q7yJAU8c2tvtj0My}=>hk>HT`A_MD zV&n??sUnJYDBSe-g>mLZ!&HWPPPEIqgH~1hBF<8-OYoa#Ow(+n{aA?jO#5;uk^^1s zh~Xg0#)HwBu1IL-qhZdq*B@r7u8gz%Xw3OjuKiAb7|WKg$$jl{hA}Iqj$$c=CWo5z zo;zX_bAS20_Q7z#+cE3Pv$*|Wn2ZNJlX?5yu6#6p((Z7^MJzdM=OSs3Ww)QV2i-i` z!vb(hgV9*>=wfJdF!?Sjhj{oK_ya=8I@zREQl-lzKnoca+tB%Pr=7#CIK}iC=lMZ9 zAN9L(FB?5gOsp-wYalL;`*Z3z(mBLtnxn=B;~1H{KC$x z^jMiyOmIFUAH6YYVxA`ES$;*nJXwK!1@cwtt7Wx%PF0|qYNbPO!*BQj*}{)Q__X6F zuV}A3s{X1btHQfgQ!l*xFN?|*3+xdrQ2$?x>i@L(x;biBEQ(cq-5g5`i=#gu{a!h& z)}zme__qosYuS&%FM+3`q3Yltfv;QsL&QZHB~TS0PV-a10qE)_x^{_PD(H?^w39Wp zb_hE=(nwa-+Bx|NovcqbG6rbbMA?!abbw|%&N%_Z+?i}k69+Z52E=oIPK#P?cMy*= zGPmDRo6{w=Zb4=X)Q+K^(W76;`e8$D0ST88#T#l}ZCH3%+5Z3txD@yM!;@&#O?rK< zX)1Ls>E}{Jah|7{u1S$a$8kTBx$N%z>*Y%eDJ?3@y}49*V6|^?G+gb7IFa397R9OH z++1_LoD8$hI5)yUEJgS91+JHa`MvnGn+tQFTTM|hqX1)Kru~xxE`Tpjxftf6j1FNe zw4WcuBaTH5vjlLFq39Yv9mN@ofNi<+&3>gt^qBX3Sj}wXTIh%nl=YhJn1i zD|uM4fDi!?G>OG^qyCzyzF}>;>{NUcR7gK~4`n~3579o{4}I%kNZ)%)tebu>mC>Qt zSdcw|?1m&H2O|qzV$HG^#jwX={Q}P-u$r8P^@XYhPz#(zwFOJPy}&`NK2CU+oXt1W zEI9uVmE#&9G(V*QS*O?hzyly&{@rEtpt!5#4fh7U;oYFO0G1`_m~w|Mkp?xOx=LOL z-*6jLv@!B4NNG0I?59nPcaU2Uok&pgG-d9CThNeuMtnQAJ#PBmZEldBfIU_~~`j4TpJlD5t1=3@6(1KSp zT4-Ha#jUSaVXd#M1PLE2TZ~Yw>lr!1T*^bLE)}U-lf4%&@#!^W-snjzlLMYdNWV#s zGhyynYsFp?VIELO-?@olv1}MO9}kKcFX39`L!qf0YA+cgfd-88k)^yIKg)56O#ZXC zO1piYnPs!p3Gb!{GEcF>B3~oa5MZv!&L-S=9*BFe2upLxyI`*R z(^$Yuq4P_q%-sN>&~++)2uX{G9n;jbZ(oN6W;b(Wk62`T0}gOca*NeA8pYdYCi4oj sC}KC;?uQ-i(h|%gQCOG@ZV!fR+~)>YoZJL=fS9qcwpsmA@LEv%57FZTod5s; diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-310.pyc deleted file mode 100644 index 03a12293885e9e925c33e1e981259b4762108b21..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6160 zcmb7I&2!tv702QmAVo`*Ecq*r(==_FjIB8RO51!mabm}f%(k(Ub}()c7zlSRkF1=RH4Xt=^ztaO;iSlL%S6*M*0I=ZJznufmN znds}>h^>z8*^+L?PRI2svTvbZ_2y)s#kCGo6{d8Kc=M8C#|xdhS7(Z&TvoWl-Fpgm zBeuD?r+G`DRCpDX>X332lsR4lr8cB2gK~t=gEBv)tbnq>>!8$!lw+VQ@+DA~hLq!= z9OcWPEDtFsIJ>4aRvu#CO0&TVv)R1ZO>U#-ydPxYMzbqA4;cnQITm(1z3nXW1HKuA zQ4;R>ogfqKyBKpn2(nW5_w&Qrm$Lf+O%p&fu1v`-lM^n*^Ch8n2(bC_e$&`%c z%AUHf4j}wY+t&w5UmO1GnL(f1Q$J9C{dV8TwS5yKR;F#*Im=aA!hPa(a&^G)?+!2n z#?;wk19Jd_$@Pg@RBnuC;l_XsFcWyRT=6Tpj<&kB#1(AMYzh+c!y1}P4NVj{D5M;uI?fdPdo%w#DCQ0F5?egt7T5G3SVTd4UMTHskdQrj)_gb{I9VJ;i zhzoPAokW2sDhDb(&kOyNXzkO&TF-iZ9rFVC)~Syu$DXf@#NY$v$2x*owA0;p;=72)c3){fIvX%Odn zJK_lDSwc=CD=2)F11WlBRyGKQ>L9>Hpe2J<}A63Fq!w91| z_Wg+koyMr3RY_~6O{WA}TdA`7ladzVajJ{}E*y~I2i!|-k!IuvZij@KQK3GuNbtXf z&XeGWq9o*@cqV~q0M;fi;cX3+0RhvNfHay?We;h#OQMRl;Rr&fc%2%uUNKJ%*>GXf zYTn=z0DFUr6&jfjF%pT)4>tlht~hNlsYoFuafW7);-CrNLrn?gHMR_;n9TipcX7Pi z+4fDgPK&%qFVg~ZWgk{NV11UuwLf6+>f(E#H`MaO-$8#uBl7E{w2x39k;icX!rl7B zcZXIWUYmY%tVmY%NB zs!BcSYK@ty`_=B^cqg;YqfZe^~^*4GBGeZAXMg$s$|o6O`#a>C+FZ zjFbi6sB?CoA|Ik}uI?c^`=(Tw+I+w~J!rKAT%82kPcT!dGP3c)y4-BaNHs^DgGg4W zHh8%WH$2uX;bc^3 z=TxO;2}vaaX}1H7BFjD^3-G9C>OS^QJ;*{|LEJC3u8X(vmUtIU!7dfLh~n+xmZS|7 zZp2#=;y#b=7LM!(8Js=am}MB7m?pK+)R>E(t?r(fv6JT-3Q3=gVr(d;7|}o;m&1lq zA@ww)>`qtQ3JJucLWrR%E#xog9JUadomsF8iouNRBWFtr`f6?=tFp*~GCMcvA9OW0 z_8?8ov(!p?8Ug zmuo4r!t$#YY_}@2xH+=m2M8GygJj$&^iC8cg2GUvR+=;U*9i}_fX6~>Cz)8J0cq^g zNl}C@v`%nWNV7LPJANxn3Z3_|jfPwL@C)<`g^R*%i96YbPia!@mu#l$^QD!n7(ot&v-+GFNrkX@sFO8eAOPU7Aqr7zwnhK8iD1)mj$}G;UQ%@bUatoT! zCN$A7OF#M{mLo1vLsBU-uO@m8yYwA8$C2d_EAm|E=NH8PG&lTP49cWx8Odv%&FeN> zQfLc<&G9fx8h(;isDu)5LF!32UiFF)`C=VJH%3Tg%_g6g3 zgq<@JcFz9=J9h$hl_}U&B(*HDiT2*nj_ zurXgE?w9BvLY(-RXfqHe)@Xo?xwH>54RK&Ijp`xXiJ#H<*1?SQ?ULfQ?*+F79c zBk1WFG|vK^35(i2@q~J3gO7km^&)B-qXEny8vszHV~srED4QyvhQq9cTIntq`Wg-| z1huDdZ!JD^T+emL{Te=qm?g0_mIL`RAyHOF=n`L_67`v=`ZG9EO0RXcEHQNqm|7-G z9e+|9&qWs5!5`?GpHV|kPhe^k&(m*VbaWxXO?f2&pMTH6l?1MOrmiHcqH=}MR^}Gh z!_gd=K^{a7K=&UIra>of05rrFc1snfxQg6B$I*mt9N-%>b03G1-0W$+l_Imb=O z<8X`HQ#@|603HWC?{XJBmCec%^SD`cPwAW49I`KyS8z(TvKp`QIb>kk4Fb>+)cG9O zE|`aB3*f5BAFh~SI2hfW=L`4kK1E-}ujlaJ{vzO~_)EjLvfReg(QFy*a<+nYMdo$& zrf2ggNuwmn+PKY8t~Z$H_C%D40Jjp{GewljF@VpTyK?ot%O5sByng;;|FiQrDO-1< zc55R`3;S+}=+^EgZ8%5Dlqte_bi7*MPC*`CMYWIr_wi#Hg2hk4@Q{=O7k>eNGGIO% zxQ%0_=lB(|(~+#uT1g@(u6dQwF3!JkULO4l`wB=I?m9eIUW2sP+mZ0>azE$aEY$T- zcr+88VC3Oe5J1OnMcN{6UXt4~v8~+c-$Y4EHR|)CAQafaJ!$MW_6MO8xRWhCV00K@BBp zFhuE(U>;H$pVNR8$t%?RHZ@XeB%#8Ay(Vd|n?}eeNY3(I&uMpB{&(rP=vl3x=i?H_ zv*n!{?&L&+_|6a1xMp8j=ptneLD#WsknJ`$9+MHE-I4icAFq8aaEXCxQT*6sS)dcD4Y`sVWM{{vLz B0lEMH diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-37.pyc deleted file mode 100644 index 30734fdaf9ed09901944f2c7a6b049702867c0c3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6431 zcmd^D&2!{N6<13djb?1m+GDTxD_N3|giIjoc*944By6&YHv#Vei6N9~DV0_&+vC;9 z-j=*;yEI(9RJiPgzra2KMOFR_PE>K?09CreWp5lQimLGYwdTVg6N=3q(Ue-P*H1m& z@4er9uiraUQ*{H^AAj@r@XZ$u<8OQ!TsAt_aHs!3BMm8Hqboci%D(BDpqa7NwLM$Y zEc7d$gT5^*akcAuuBJP2ty}jdbia!Jq&KDeB5rhrX$Yfx#+%j@H=gM>y{0g37_uho z4-8q4#K!Et<;{UIAtynZ>{HHyG9??JH2RczP|nC{P^SBobD+$~CMeB5+Ma|eIzgl%K31!yS1G~ zejqo3FiOH*zZ+z#vxBkvyFnH#Zw4|_c(eJ2r_Lqxc;+X@ zs*pmO4_wcZmb4!jp8bho8(sygs}yIKw&Qr$$I6l@%p&P82fL992U9tiX$m*r(@)W4 zMkaD&-#jo6A@a;Run&!%)qmPMzsf57vwibjY^TkJn1gH^RzvJPJzk>yFJTF=wj}6Au(rDILv^q<^e27)?sgEDBU+J)I4-98 z<`714?E9kywS>Baam96ME1pFsrE$Ar@{*>6co|*;NI#Bus4sO?n$bJl3yCCNd=fO; zCA@*oX_o*cYBvD05E`r%K;4lg=KTez+dMnGs4_%>y|Nm8hqD8=s^u8i2%_2Q>C(M|(YO z{X_wmcpZ1j)#SzjEc{UPL{2v^;NjI*L2sG*Pt~*Nk91WkiRyeGw^ouN>SC{XN zCF{Y1Ld8j3Sex6~KnBN^`w>^xSFQ=qXYS38_HkOd+z9XM{Op)=5tatZeFcO4C6vm& z0>VfcBMsl;EQDdH;Uj|kw8!R2a5({Gk~CZdY! zjdokd$tiY*akMaRC#_29q}{%{)1rPmNF%NK)T_ZD={lphsB(V4E61CHqDxFeRVM?(hk^nOF9q0;GUuPwcE@H3?Pd1aztAa+ciEU)QpFuG19ESMbgmM9vJn0w$$FQd`vqXkc6U4|2#7Lz8dPh1wR^^A$c zaqft6M-bvNdC~h0A+(2=Tx{@0xZE%;!$k7f1C%6FL}SCsD@Yi%E7`Yt=FXK2BcNBb z9xo#itVr{mF^>1{cc7oo8?EM5K+-CY0a_a^r}W;hV@c}UY$%s9dAx>R%PrlGEpg5| z!HNVO8lqEBpXdC4jlnW6H0O~SHtiu$p3=Ti>DMiLh2ycQNu%|O1uD*JjCJH%|jef2}}xL64^ZH<>CG^ zuxM*cj^VxgJTUorqfYLe#H4i+gyjM41pL$z7Wd!b_Xo;_2SFaoj)CvOX-Pa81ZfPv z<=+h<}(MmoA8gXr;~hKdYoT*oRh)neiU0Hv?JaN&d64tts zc06k8@`r^8CGME&b!j3sv2g}txdk3Ja}IFo$}7H`EP2nLhWQau;vSY!>ZoHivdDJ1eK<4AMyJ77q;bxX>)}Fqv|-2dOU2dZl{=~U#RO``1fa@wa%B({e4cx=w4WBNusDXZ(^|a z#jA_@JnOk1fRv%ASk5Ma|g2)^>iynQEV$f z9lwR&((FeD=_;8 z2#Oo_nwsXXMVnoqwG(obnpE^=>MB|6ZYK@3MS625xpgW=ZL1%%q3#t=!rtuQzpkuq zCm}H%#9n>WSKlJngyy=}AzDi;IyZIEP1ndubnz%%rrkO-C(iVSj)-^|feZJIvS{}f cb9^0!X~DGZ^4AoT_2w(hW^)GjV)NDi0QJEjiU0rr diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/model.cpython-39.pyc deleted file mode 100644 index d5e5353b42d2d15592a4692620f29e968be63c3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5565 zcmbVQ%ahwk8COfPUiLT>kMr6`2+KlYU^AO6kFabl%O*^AsaXR_;2=V&G-`Q9jx2jx za(2cgbD2LZHP^sc z=e5`zSgs}MMr;opw=Tyf#tnBxj#=CsFjZm7;FP;6DOP-X&~jT$xv6lQI}a4@gsi`I zpt)^O>bwC;qoSMvWra6EX;zf8pq%2XpsZGubD*5&El^q&w z-yHfp6u8sr4*$kr$ip}dNc!TXRX6GD7=((;xZ}Y>eZ0-^1#XFvvpgZTkD625SGS#Yo zJ*&~{4%8dUZzt1Qt{obfF*B`in_R*6jIN+Ve%wU2@rb$lR^+FGRIsyB)Y9=N6vD<`VR>GZ zM49ImYLXPrM?*f2!_6qoikk3~?XWQX(I`xK;oJ^)$6=C1eq0!vQ4;#Xtsg;pg%|p* zaPwD%*~^3%?~B3=;*#rxd6pu)0kAQ@f6q?x5RN zrYf42!MZC&cy|g|`YhKkC^*F>&JW(tzS1ovWqU&5GX;w8+u8bB>H%!yY?s_fw66tE4&$G+g}xJg^+l=6&m)@__|Lu<0h ztIJQcdh(G$Dwz0#5Gv?+9w3AQV%+e&-LW5+GZIkob$aDWr6WNY$DX&apqDW#XjM|N z*Dy#)-!0a7SyBSroVeze{vuAp*@#4%k<+*zkb%uF8k0RqPw@^0&w2_#Dg6UfgGno{ z!3P+;^abWjnUNJ+(t2xM3F>2#wjt{1JGN-xPkfs?(r!VPE$GNygI05uFIeMME=c=} z)c`Z0$h=_3hnI`f4wFy|c~(a-N`XJY1F4K)tf`$PBl%*ocD&o=7C_cOi@Z(`(~5HC z5PCjiQVFkZk+!bPxtfF|Kmn8PNmh4)r^BEE6*$z!zXNZy-2 z4zx{qNC7A(E?OpCFNAg=Lbw=j5RXp81(HREPygwg=$4}`y*?a)^!f%6R)Mf75!SkQ zN?ffHS2Ccda22G&u71Z)b_$IgsV0xmg&zf;NnB{dakdP}+q9|*Nfi!HHU2wUJKo81 zBxx(uuG7OOkVJU8t%%E@154U$wGbh(yaK{9K<|3R`BTerg0yD9w9jx=F!b6*6tQjF|rOhKPyr?lm#fh9=cg>}vzQn*FP&(#B@ z1fC&bRl5{x{q1c5rz#EhDuk54lC2cx&2CqQ#}yhJh0j9WN;oUM6$udy~37bKSNky zJImCk4>75Q4*P&daMa05OLp?5szj$xiZhP{hL}@G^wIVIs8XE(?1tILLvb%4XE49W z@~BB2`YQ%c>yS(uO=u3~1B^5%XKe*zH8+v$nWSTxl^gUAx|-JxaNbDsRB+oIr!G^> zjj2U4a!o#KXHG724)8d+P1(oPK9Xlw^6V4x>?zy*wdASOv&6&8wUo(f`BoF!-H^%U z$~-txw3894&lV0OMo2nV;pw=XY7m?G|xOz@m#<_d#xpQW2A`8^YBwr%YF-m{@BP>Ty z0wPbP%mT@#J67pkbdE~uDOOhtfX@a!^gMU|2TYa;o_ZE3UQ1s^LfzIa)>0>HiwE|l zCQ14+CTUf2_KF}18R>~@?weAI`}2y98?vT@c0RC?@1ia~QPY`4{llae!Z(tiyc5i2 zkYGyA2sde!|Hj~?Rmu`dgB7rQs<_5g_&FWv8kKB7F*6R4pyf5sN;be#1J7MqD4Uu138l9;4F}lYzg3cXTsJKB>Kc()- zFkQ+&d*c*Rs=_|H@Si77I_FDI@c}WDrzo`TBoTy;Tc5A+%GE0}#dWO@LCR1Ax{j<3 zqh1sW*DBX~<(jMZ0`W7FBvWI8St=Cj&*Z+0w9PF)xVJS-kjBaIY4-vzrcysGY`nLk zHjlqIc+ls7u0>ZtD-@r#54Eq4uRCl4kH3nE(_N<^~1TQP>)p!S!74n;hk=bYO2nag9SLVc$lG zKyJ&G-_wUI^gRu3Pw6{ob(F7=&y?LR?ix_%N(MI|*~(V3W^Nr&IE19bQ@Mpz*$GNy zgfJV9&P28>RqrdMCKAb@>S|q(cwGD zj*kx^GBDjD>X9_@OPbe{a4#$LD8t7R0&pId=*_Mj4Ys|vC=qwfZGYsUL>6BoA+OV{ zMQ)ZVY3WuABbHyXT$}3aUX=LpvZUwFcrvAen{sA{5+AjR2-Rkn)i$XG#*?*0$SY^t zVRxQi9+8#(kp$BLTC90V5T)~yYsoJHmtlEL5X2>PZcEakWYUgs6g5Ucu{ZsXWyNdE2A@QFM7(B(MUHf+miJ}Bh$>$qslT-)fn?PmGqcDD=Ex{jWg+ZE<;V992iISp!`uC|+<4c94G*zHQyX;rt% z&5FzG_t0k$56X1CwdQd2IVH^>ae1ZQDg(BA!-%-7S&_n^{sL;jej{2(Rjd|n3V9JKdUw75pniyj`Gw!3krg#&s z&Jtqmw!soJ%8qtjdF_z5M@;M{+@u&2dmvcK*GPl`*Yp`hWJL~Rq{K+CZw>1S#$(C* z#=EMjynfC##YAsRmcmSvZrZiPVpnv5aYa(3)>5x1BF#*2 zeE%ak%mGfL$1BXWGz2%j>g)Ap&DUx!*YK7~s;#qnlS@H1eD$7hISgV@Z!zbdpO*EC z>vCV+f>c?zDsFYX+^ugrzOe>5FOB$`+x8P~n^)I;y;^bo1f;Cy_=)a%rQ`U?N~hzr zm~WC?mZ1Z7dZX2SeZz4uM7}TJ)a4s3=bq~ub=Tp13X8MT+YWD+YrMj2^H!yL=Wc~F z-{`tdr!?ayyOn0A;rOX)YqQ+g;(ne;ZnaxZS*nVkUaOWHqg%4sF_cFRvPXR}^)_#P~ZPZAg-FfP}K?JO@>*INxcRjYK$ zYc97_0Ody4we=3i(iuHFP+z^{CmT|s?KFDIYxP#8;TsL;`}Y*Sg5f?`o`(q_O`NZ= zy1P}mGr#7*06-7fe3w_}H{5!oJ0B>ttj~3}dJ0aDF5eJsUiSuoshg^$St^xT)l~DU zp-rgIs$=*|qQ}rI%`{B($UnnO2DLHCfx2d@lR7_-p{k_Vy&vsC0sm6b-y}tcf=B_@ z!cq@PR}~2;O4ikG>$eS1CEk@rfld@CbHdAoJ=!BZ`LIWSqzCFJ)#+tngtG*r8;?*H zSi&2{{}{{y)&Vo|B^Z`*Z$gZ+boX0chh1N z{iA!8cQuw~)_n~oWK^W?B={exN=tqHrd+4O|0G!Bnhrw{u6E6UT4z>ETJ5j@ihloc zxpw;NlYjezAO7p*+Qn}z{KL1mo_nVD@9Xb<GFA^TObfAshxA%E>13~5+FH6y{8GBA#fJJ*71Ne za;wZYS~%&tU0<#9W7NM)fOIkd=TP$t!5Z+;Ww%{M@e^q6J_|q^hCgFAF{{o>ZJLFy z8EQe+HRzkAPJgH+NbklzH1&cu4IML7?V$<%%xaueshR=k{YFvA;a}?5U!uZPFXQ4M z-N9i6y;5)p$zwsNOcxm(6BfxUB+r19CcLDV0<|HjwTPCElYlM_t%M9dX0#+riOhr& zQN~BqcMWJAv>1nY&Lu1nVtI)rLu^E1sSqpB0UT)6S_1NFuGar90r@(5gVRY+tTo!V zDvjc`)zvda7)9r-yU}urI1`F%D#Sn)G_=ty=Mml|~&l6_&a9IfqNhfulpD z+eZRH#hW?nw$Gv{q8oAx52pAE%sCNpa#J42(=W*{qb~QJlYR4zY8&>{WRBC3HLL5A z3&iSGw=^nc)3-=k0x9&ZO~_X$kx~L5Be;Em4CM0)&0}`BvsnhWpQPjKuIp!_(+fxf zM;D4D9b0$ZVJXa!>lH{M2)=>wx}+wCM&=qOUd?`JCJHExho)gc1gRyT$fFN`8vh)K zt0qA2M`A{zCq-=?6^NSYh5`nYMnaR?NqOnM*r64n0tAkF5CXW`t`5O4q+nPQON3ZP zV#yH81}0TxrQy<6>$@XxLlScPgBL*u7k{3>3jn2DU=m)WhR+b7jrPs()L)F5390_H zz5FErJJTbfE&MTRUL`Q3(&=b3zl+{3xwD2kq0Q=fowh5h^3S8GngZzkh=#ULZkLM+ zwu-DVG=`~6!>N|SMx{i8&NxOec|R#PG@vrjN>sF+X-Uzi!q#D&B%H|%T-+4u+K!Gh zE9>R3**J?xlR_N|yf!%dZmYc6DA(D&o6>gQ_?}^>y6YQjYYoTFz}}WA>VWmNtq$UV zrqgWmEj!J4y8{OaZb|{s$l6913e}4w44%Yuae7KlkIpTCQXbA1l*mt!&(m?&{dBj} zsJrD>XPc7@aoQQ*xK-|MOIZ(HA1Dl`*B7FY`u>ZIX~)ou8LzkMZXK$=?F7OO)&FCR zB45!M+!*?|w1S$~`A?y}Y5??pv~a@ZE4q$~tL(yL?rKs#4T-5CmXMehVo8bVA(rYH zPARSGYDwoq;%Pqau;MRc%9zZ3me8T+n~pu7bXa>K)}Mp8OZ%PI0+q$-U<0kD0eU~G zM=E<5NbpEfReR@x$<%jD0iS}RYgk=vwPgBZp;napmd&4uY)`O|Sl2}4M8_HO5QBws z->D{iaR^@;!k35el|Ek5q}KrHLKs>~`-$ega;xcQYK>csa%dk55{VWH>uAZAz*1(S z1(7L@Rudw@QbHtHM~DQAc+|v>(Mp1dq4!$;2AkZS1~4>D9sD!i*V5(>c@v#Csyblr zM`9RRia!7Xqp8B{8)NV#UF9}TZg|0}XOM}6XKa*>))CNik zImb-Uq;97k87JwQJBpWZ(=gBokiDeP_-9>=-Juu>Cg>Kj9ld&u-*wl^6y5j;j|78mjk{B=O}zD5cL)uQ%(lL zPSsl-9Q4iLK#>ReNn$DycvN?xBbc3>g9>6_SjDQl!JWScarYT7MYdMh( zYgwdOf?mW>%!q6P+F0U#>eU2AFWx96SkMp2fQ*pM==V<`%9-@0ylM2O9@9VL&3b#i zecpcbn~&)~;2rde-XZTW`qPi;KjIzrj(Nwu6E1nK(l^{O@N_4=l6T5G?ZU$pr}1=W zqT1Q0HW$_AquRoor*V>6cXa-zZgMvz(%@Ybi!6hr(@B(BlqHlol&4VUQJz6Lg7PfN z0?Ij*qbTQ5j-gy&hPQ<14B9~nrnlACu6oOEnvEkRw7jDthMSmD$a}`S$cU4aLc$x! zh?1=3P&*}SdDPBO4ZM4#x1GhQPVwbv@7WRijSF)pgEzC{o%7CnPhfQ)KkC_VRHULX zI*mFZ_K3-7)F~u?f>AGcPkI-Qpr9jF=UBqfz&X{o$yWyr;cqyvx|R zj~{hD9CbPx^?*1iiqWWt#Nlw%XT2-lRqq|Vut0dHq7-s}tXW@$9>f>^p^ysh&0#0%b%u-LvS z1^r9%&F`1>Kne~Zs(;bVi5JC5Q4*)b8F5z3iFvWGla}xNpj=yCoD)xqr^OZV99ER{ zPP%z7T3GtM;EUt;?Wy>kFM5D=;&%FK@@+2>=@)nH5O}Hau%s4rr z(u##HVGwChh{K-)cpyz+htN#|Un1}-f!7H9G=VaKpCM2oa0_62;z;aQe6G{3u3s!} zPZW#ASbI?YAZrz^;wvxSe6F}qv|`xe0Jb!MEe~KTMe6}s=@5jm==+i3bWO4MJ?Gq$b>yb>r&+ZN@SR6sXWMpodn%-oN4)ynU;r` zuzz4$i8HM{!n6`F`KiigjW&ieY?&Rn=#NiR1j#aom+#i$O9VjM#hWG#={s5}}I zzJEj1vsUnxJNwhQi?yjYsu`x35n%G?4-o< zAyyioJT*XhdVuoG0Oi>M%DDl``2os>0m{VzO1QhRbuJH3t_)D(t~5q@et`0c0m=(3 zc&pah& zhGxF(o%QCt`Do^efteSEW?mecdFd0+ycaI{;4?q#EqIIGQZ#cAc=UFAd1&U9p_$Kp z;+YSMLqjuP@s=rwiDnLhlitkdhh~0aXyyx_c;+K;^9P^#stlYi1T(|2z8t#vS5RJs zn?D6N{~Da@C*juXd_r7Z2BF9qHJbLrpsjeDM;a&uzfbxF5UaEeQ&IN zK`e@;upPkyrQPH>%LqBnh3yC&qW1G}_b-I)2qI*A0h(}DEQnAilG^0)v*#aW3^<_jonT(5+msdV`(Xb4l<{3hx$nBjM+M!`&d=0^`7 zVsPRMBA$Cx+aj<{;0*$TfCo_0?eQ@Axfu8Dqd;hTkBl^AS~2_%29d`4_M~ivi{vcy zl7U0*iyzUx^bzgLAJM)t*e<^hC_l2Esi6JQ1;siBR}||QTvDuKa80p}fkojKjIvfi zP(=Y0e~ZAE3Bb{bUm4sWG`h#>hgh6$Hs!4fuTo7$RM-71IhI|x;B2Ge{Lk?f{|fQ# z5Lh7aZ2;etK~%+E=RZzuCkV&{-e4{s_a5{)2EvL3{#DR~I6T?~{OsC@cY2!^_CrEyCZ2uf_jbm zD2TBOc<~F;S;to|c+W2XMe6&dUf<$C-!D_&uk`wYaH7ZetJL?MUSAM3^!k2{`hLCF z7X$~rzTcp}-z4x01l}g95x3ciPaHJF;uSkh6hXLxuU~@e&jtDrCa5m`!1WP=1~~AKFT?(<9$uTu zyTIT^@KIE=(K@YDK+?O2F#aoP7(X(S!KK8f!%VXu z-c0KMH)9Tu!haXCaH1yAisABjbQgjngU3(mjuR3mv0a*Lm*T0coiSBb$vE8hqyjNHK3 z#>-c54jsOE30s3h^BGd9?o|L&q3`|ZmZ|qgIQ>zoL`z!_wJX*`bJ5&y=FNhZ)l}=D SmACdOZ&_*OE#<9j=Dz_z+27{? diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__pycache__/transformations.cpython-310.pyc deleted file mode 100644 index c166ef2fdbf365a3db114991e6681128de528b67..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 476 zcmZuty-ve05VrHzG_*xXh=DiA(u^#KR;q-UI+RWsKvwJqLlQ@}i_|^>5)V)&I`T@M zkYMS?3_{}EqSAr0?$h1(`F?ECZbt|#et6FoE<$fM*)^;7!Bqu75yb^6af&fw_}Kw> zsk6ik-+I)g9`zqB=P#`qtg%6Z99v&Hxxd6|xUF+--9NX0Pn#59qPVewnP|wbFhaZ! z5IgzP!rH2rTM40$UW|>eGE%T#=Cv0`=12>f-Q07Wc zF~#T19E?asMY+l7K#JNNZm0_(`CL?*6z2u6O*j@~Rw*G(a|mro0oCa~wC2_Ic7W+< zru1L_f@BL8Uc=`}qF_UkvxHEtXXGxKFv=v+j3!EE$yD<~CEH#^!#h;Bo|*8p6m(j! bGx!apjsaT8^FrLh0h|sFzP!C~9YxCi From 9070926dd13c46e40f64a68af4de32f876f4ac34 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 23:24:48 +0530 Subject: [PATCH 25/47] Update train_utils.py --- .../src/utils/train_utils.py | 33 ++----------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index ee60a37361d..4bb41f6b181 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -11,7 +11,7 @@ from torch_geometric.data import DataLoader as DataLoader_GNN from .inference_utils import inference from .transformations import train_transform, test_transform -from .misc import aggregate_local_weights, compute_lcl_wt +from .misc import aggregate_local_weights, compute_lcl_wt, save_model_weights # Train 1 batch update def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, @@ -51,14 +51,11 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim optim3.zero_grad() if optim4 is not None: optim4.zero_grad() - - + ### Compute Gradients loss.backward() ### Optimizer Gradients - #update weights through backprop using Adam - #if training is without gnn if gnn_model is not None: optim1.step() @@ -73,8 +70,6 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim optim3.step() optim4.step() - - return cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4 @@ -87,10 +82,7 @@ def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, backbone_model.train() fc_layers.train() - ########## Optimizers and Schedulers ############# - #print(total_batches) - # lr=10**(-5) # optimizer optim1 = torch.optim.Adam(cnv_lyr.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) @@ -195,23 +187,6 @@ def initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model): return cnv_wt, backbone_wt, fc_wt, gnn_wt - - - -def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gnn_wt=None, sit1_gnn_wt=None, - sit2_gnn_wt=None,sit3_gnn_wt=None, sit4_gnn_wt=None): - torch.save({ - 'cnv_lyr_state_dict': glbl_cnv_wt, - 'backbone_model_state_dict': glbl_backbone_wt, - 'fc_layers_state_dict': glbl_fc_wt, - 'sit0_gnn_model': sit0_gnn_wt, - 'sit1_gnn_model': sit1_gnn_wt, - 'sit2_gnn_model': sit2_gnn_wt, - 'sit3_gnn_model': sit3_gnn_wt, - 'sit4_gnn_model': sit4_gnn_wt, - }, mx_nm) - - def instantiate_architecture(ftr_dim, model_name, gnn=False): # If gnn=True, then instantiate the GNN architecture @@ -459,14 +434,10 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans sit4_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit4_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ###### Now begin training max_val=0 for epoch in range(0, max_epochs): - - ############ Perform the local trainings for each site ##### ## Site 0 print('\n \n SITE 0 \n') From 10a9f163ae3f240baa9eba579f8f71c84d90c595 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Mon, 20 Feb 2023 23:25:03 +0530 Subject: [PATCH 26/47] Update misc.py --- .../src/utils/misc.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py index 223d44abda7..f853ec9010c 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py @@ -120,4 +120,16 @@ def compute_lcl_wt(epoch, cmb_wts, glbl_wt, prev_lcl_wt, device): lcl_wt=1-cmb_wt wt=average_weights([prev_lcl_wt, glbl_wt], [lcl_wt, cmb_wt],device) return wt - \ No newline at end of file + +def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gnn_wt=None, sit1_gnn_wt=None, + sit2_gnn_wt=None,sit3_gnn_wt=None, sit4_gnn_wt=None): + torch.save({ + 'cnv_lyr_state_dict': glbl_cnv_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + 'sit0_gnn_model': sit0_gnn_wt, + 'sit1_gnn_model': sit1_gnn_wt, + 'sit2_gnn_model': sit2_gnn_wt, + 'sit3_gnn_model': sit3_gnn_wt, + 'sit4_gnn_model': sit4_gnn_wt, + }, mx_nm) \ No newline at end of file From df3be4d83e462a0da7523617b05871a39369261e Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 01:03:45 +0530 Subject: [PATCH 27/47] Update get_config.py --- .../chest_xray_screening_federated_gcn/src/utils/get_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py index 40f85cbf953..96dc238fd99 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/get_config.py @@ -10,7 +10,7 @@ def get_config(action, gnn=True, config_path=""): if action == 'download': with open(os.path.join(config_path, 'download_configs.json')) as f1: config = json.load(f1) - if action == 'loss': + elif action == 'loss': with open(os.path.join(config_path, 'loss_weights.json')) as f1: config = json.load(f1) else: From 117b408584f74e9dde4cbbeb9890f2a7ee4d578f Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 10:13:40 +0530 Subject: [PATCH 28/47] pylint fixes --- .../src/utils/misc.py | 2 +- .../src/utils/model.py | 18 ++++++++++-------- .../src/utils/train_utils.py | 2 +- .../test/test_export.py | 6 +++--- .../test/test_inference.py | 6 +++--- .../test/test_train.py | 8 ++++---- 6 files changed, 22 insertions(+), 20 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py index f853ec9010c..62dd3325d1a 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py @@ -90,7 +90,7 @@ def compute_adjacency_matrix(adj_type, site, split_npz='/storage/aneesh/split.np def average_weights(w, cmb_wt, device): cmb_wt=np.array(cmb_wt) - cmb_wt=cmb_wt.astype(np.float) + cmb_wt=cmb_wt.astype(float) cmb_wt=cmb_wt/np.sum(cmb_wt) wts = torch.tensor(cmb_wt).to(device) wts=wts.float() diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index 22aad37c06d..a408aaf3045 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -143,7 +143,7 @@ def forward(self, data): #####Gnn model for inference, works without gnn dataloader class GNN_Network_infer(nn.Module): - def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim, edge_index, edge_attr): + def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): super(GNN_Network_infer, self).__init__() my_gcn=nn.ModuleList() @@ -167,16 +167,15 @@ def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim, ed self.my_gcn=my_gcn self.dpth=depth - self.edge_index, self.edge_attr= edge_index, edge_attr - def forward(self, x): + def forward(self, x, edge_index, edge_attr): cnt=0 - x=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + x=self.my_gcn[cnt](x, edge_index, edge_attr) for k in range(0, self.dpth): cnt=cnt+1 - x=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + x=self.my_gcn[cnt](x, edge_index, edge_attr) cnt=cnt+1 - out=self.my_gcn[cnt](x, self.edge_index, self.edge_attr) + out=self.my_gcn[cnt](x, edge_index, edge_attr) return out ########Combined model for inference and export########### @@ -202,7 +201,7 @@ def __init__(self, backbone,split_path, gnn=True): fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim=512) self.edge_index, self.edge_attr= compute_adjacency_matrix('confusion_matrix', -999, split_path) if gnn is True: - gnn_model=GNN_Network_infer(in_chnls=512, base_chnls=1, grwth_rate=1, depth=1, aggr_md='mean', ftr_dim=4,edge_index=self.edge_index, edge_attr=self.edge_attr) + gnn_model=GNN_Network_infer(in_chnls=512, base_chnls=1, grwth_rate=1, depth=1, aggr_md='mean', ftr_dim=4) self.cnv_lyr=cnv_lyr self.backbone_model=backbone_model self.fc_layers = fc_layers @@ -218,7 +217,10 @@ def forward(self, x): ftr_list=torch.cat(ftr_list, dim=1) ftr_list = ftr_list[0] if self.gnn==True: - prd=self.gnn_model(x=ftr_list) + if x.is_cuda: + self.edge_attr=self.edge_attr.cuda() + self.edge_index=self.edge_index.cuda() + prd=self.gnn_model(ftr_list, self.edge_index, self.edge_attr) prd=prd.transpose(1,0) return prd \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 4bb41f6b181..568e4d04b29 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -57,7 +57,7 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim ### Optimizer Gradients #if training is without gnn - if gnn_model is not None: + if gnn_model is None: optim1.step() optim2.step() optim3.step() diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py index 98939b0c21f..22e94417686 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -1,9 +1,9 @@ import unittest import os import sys -from src.utils.downloader import download_checkpoint -from src.utils.exporter import Exporter -from src.utils.get_config import get_config +from ..src.utils.downloader import download_checkpoint +from ..src.utils.exporter import Exporter +from ..src.utils.get_config import get_config def create_export_test_with_gnn(): class ExportTest(unittest.TestCase): diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py index 7f222652b9d..b07547d8a7a 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -3,10 +3,10 @@ import unittest import torchvision import sys -from src.utils.inference_utils import inference_model +from ..src.utils.inference_utils import inference_model from torch.utils.data import DataLoader -from src.utils.get_config import get_config -from src.utils.downloader import download_data +from ..src.utils.get_config import get_config +from ..src.utils.downloader import download_data def create_inference_test_with_gnn(): class InferenceTest(unittest.TestCase): diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py index db5753c0188..21304c6e09d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py @@ -1,10 +1,10 @@ import unittest import os import sys -from src.utils.train_utils import train_model -from src.utils.downloader import download_checkpoint, download_data -from src.utils.get_config import get_config -from src.utils.train_utils import train_model +from ..src.utils.train_utils import train_model +from ..src.utils.downloader import download_checkpoint, download_data +from ..src.utils.get_config import get_config +from ..src.utils.train_utils import train_model def create_train_test_for_without_gnn(): class TrainerTest(unittest.TestCase): From bac5bafd3ebe5ecdd054c2e7bb61b8d146e74f57 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 11:01:44 +0530 Subject: [PATCH 29/47] Update README.md --- .../pytorch_toolkit/chest_xray_screening_federated_gcn/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index 250380a93af..316ca56dda1 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -72,6 +72,7 @@ federated_chest_screening/ download_configs.json fl_with_gnn.json fl_without_gnn.json + loss_weights.json media/ tests/ test_export.py From fab6f10919a10555e60115042476bf0141699c75 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 11:19:11 +0530 Subject: [PATCH 30/47] adding relative paths --- .../configs/download_configs.json | 6 ++--- .../configs/fl_with_gnn.json | 24 +++++++++---------- .../configs/fl_without_gnn.json | 24 +++++++++---------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json index de5f3f223d0..8d4a021df59 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json @@ -2,15 +2,15 @@ "data":{ "dest_path_data": "dataset/data/data.zip", "url_data":"", - "url_split": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase1/phase1.zip", + "url_split": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/model.zip", "dest_path_split": "dataset/split.zip" }, "fl_with_gnn":{ - "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/cbis_a1_b1.zip", + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model.zip", "dest_path_model": "model_weights/with_gnn/checkpoint.zip" }, "fl_without_gnn":{ - "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi34/cbis_a1_b1.zip", + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model.zip", "dest_path_model": "model_weights/without_gnn/checkpoint.zip" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json index f7d926abf1b..a8601d93ba7 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json @@ -1,38 +1,38 @@ { "train": { - "data": "/storage/adityak/chest_x_ray/", - "split_npz": "/storage/adityak/split.npz", + "data": "dataset/data/", + "split_npz": "dataset/split.npz", "batch_size": 8, "epochs": 1, "gpu": "True", "lr": 1e-5, - "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", - "savepath": "/storage/adityak/wt_without_gnn/", + "checkpoint": "model_weights/with_gnn/checkpoint.zip", + "savepath": "model_weights/with_gnn/", "backbone":"resnet", "gnn":"True" }, "inference": { - "data": "/storage/adityak/chest_x_ray/", - "split_npz": "/storage/adityak/split.npz", + "data": "dataset/data/", + "split_npz": "dataset/split.npz", "batch_size": 1, "gpu": "True", "gnn":"True", - "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", - "checkpoint": "/storage/adityak/wt_without_gnn/modeltry.pt", + "model_file": "model_weights/with_gnn/checkpoint.zip", + "checkpoint": "model_weights/with_gnn/model.onnx", "backbone":"resnet", "max_samples":10 }, "export": { - "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "checkpoint": "model_weights/with_gnn/checkpoint.zip", "backbone":"resnet", - "split_path":"/storage/adityak/split.npz", + "split_path":"dataset/split.npz", "input_shape": [ 1, 1, 320, 320 ], - "model_name_onnx": "modeltry.onnx", - "model_name": "modeltry" + "model_name_onnx": "model.onnx", + "model_name": "model" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json index 3f111eb72ad..95f51a57ba1 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json @@ -1,38 +1,38 @@ { "train": { - "data": "/storage/adityak/chest_x_ray/", - "split_npz": "/storage/adityak/split.npz", + "data": "dataset/data/", + "split_npz": "dataset/split.npz", "batch_size": 12, "epochs": 1, "gpu": "True", "lr": 1e-5, - "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", - "savepath": "/storage/adityak/wt_without_gnn/", + "checkpoint": "model_weights/without_gnn/checkpoint.zip", + "savepath": "model_weights/without_gnn/", "backbone":"resnet", "gnn":"False" }, "inference": { - "data": "/storage/adityak/chest_x_ray/", - "split_npz": "/storage/adityak/split.npz", + "data": "dataset/data/", + "split_npz": "dataset/split.npz", "batch_size": 1, "gpu": "True", "gnn":"False", - "model_file": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", - "checkpoint": "/storage/adityak/wt_without_gnn/modeltry.pt", + "model_file": "model_weights/without_gnn/checkpoint.zip", + "checkpoint": "model_weights/without_gnn/model.pt", "backbone":"resnet", "max_samples":10 }, "export": { - "checkpoint": "/storage/adityak/wt_without_gnn/best_weight_0.65908386103622_19.pt", + "checkpoint": "model_weights/without_gnn/checkpoint.zip", "backbone":"resnet", - "split_path":"/storage/adityak/split.npz", + "split_path":"dataset/split.npz", "input_shape": [ 1, 1, 320, 320 ], - "model_name_onnx": "modeltry.onnx", - "model_name": "modeltry" + "model_name_onnx": "model.onnx", + "model_name": "model" } } \ No newline at end of file From 5030e9f982987fa2b34eefd66f9e106cfb8a3971 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:04:05 +0530 Subject: [PATCH 31/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py Co-authored-by: Rakshith Sathish --- .../src/utils/inference_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py index b6d32e1b57b..b139a5a6adf 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py @@ -40,7 +40,7 @@ def inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, gt=sample['gt'] img=img.to(device) gt=gt.to(device) - ############################################################## + img_3chnl=cnv_lyr(img) gap_ftr=backbone_model(img_3chnl) ftr_lst, prd=fc_layers(gap_ftr) From efbb424d0abf99dc42ab76778d98dc70c4e86536 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:04:27 +0530 Subject: [PATCH 32/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/src/utils/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index a408aaf3045..c9c05310dda 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -80,7 +80,6 @@ def forward(self, x): out=self.lyr(x) return out -####################################################################################### # The Resdiual Block for the GNN class Res_Graph_Conv_Lyr(nn.Module): def __init__(self, in_chnls, base_chnls, mlp_model, aggr_md): From 18b8fbecc60e320f97b63315aab06b50b60d9b89 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:04:49 +0530 Subject: [PATCH 33/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/src/utils/model.py | 1 - 1 file changed, 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index c9c05310dda..b5280f82195 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -94,7 +94,6 @@ def forward(self, x, edge_index, edge_attr): h=F.relu(h) return x+h -######################################################################################## ############### The Graph Convolution Network ############################ class GNN_Network(nn.Module): From e0a5b29c5a3d2da3e5589641628389a07f6c8a44 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:05:06 +0530 Subject: [PATCH 34/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/src/utils/train_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 568e4d04b29..1b7b60ded39 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -225,7 +225,7 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo backbone_model = backbone_model.to(device) gnn_model = gnn_model.to(device) - ##################################################################################### + ############## Initialize Data Loaders ################# trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, From 2404d4f06b267541675eacee08985db93a9d7920 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:05:22 +0530 Subject: [PATCH 35/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/src/utils/train_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 1b7b60ded39..6418c5d85d1 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -243,7 +243,7 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - ######################################################################################### + ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone #and identical model weights for the other layers. From 79e5502a28aff6de38f8d1798a8c75acab8b3c0d Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:05:39 +0530 Subject: [PATCH 36/47] Update misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py Co-authored-by: Rakshith Sathish --- .../chest_xray_screening_federated_gcn/src/utils/train_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 6418c5d85d1..8d8f0d97ba3 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -269,7 +269,7 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo sit3_gnn_wt=checkpoint['sit3_gnn_model'] sit4_gnn_wt=checkpoint['sit4_gnn_model'] - ########################################################################################## + ################ Begin Actual Training ############ max_val=0 for epoch in range(0, max_epochs): From e838f36ebc5cb0784f7f425dd5c078893beba675 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 15:26:03 +0530 Subject: [PATCH 37/47] Update test_export.py --- .../test/test_export.py | 46 +------------------ 1 file changed, 1 insertion(+), 45 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py index 22e94417686..153aabccf60 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -1,48 +1,9 @@ import unittest import os -import sys from ..src.utils.downloader import download_checkpoint from ..src.utils.exporter import Exporter from ..src.utils.get_config import get_config -def create_export_test_with_gnn(): - class ExportTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.config = get_config(action='export',gnn=True) - if not os.path.exists(cls.config['checkpoint']): - download_checkpoint(gnn=True) - cls.model_path = cls.config['checkpoint'] - - def test_export_onnx(self): - self.exporter = Exporter(self.config, gnn=True) - self.exporter.export_model_onnx() - self.assertTrue(os.path.join(os.path.split(self.model_path)[ - 0], self.config.get('model_name_onnx'))) - - def test_export_ir(self): - self.exporter = Exporter(self.config, gnn=True) - model_dir = os.path.split(self.config['checkpoint'])[0] - if not os.path.exists(os.path.join(model_dir, self.config.get('model_name_onnx'))): - self.exporter.export_model_onnx() - self.exporter.export_model_ir() - name_xml = self.config['model_name'] + '.xml' - name_bin = self.config['model_name'] + '.bin' - xml_status = os.path.exists(os.path.join(model_dir, name_xml)) - bin_status = os.path.exists(os.path.join(model_dir, name_bin)) - self.assertTrue(xml_status) - self.assertTrue(bin_status) - - def test_config(self): - self.config = get_config(action='export', gnn=True) - self.model_path = self.config['checkpoint'] - self.input_shape = self.config['input_shape'] - self.output_dir = os.path.split(self.model_path)[0] - self.assertTrue(self.output_dir) - self.assertTrue(self.model_path) - self.assertListEqual(self.input_shape, [1, 1, 320, 320]) - return ExportTest - def create_export_test_without_gnn(): class ExportTest(unittest.TestCase): @classmethod @@ -81,13 +42,8 @@ def test_config(self): self.assertListEqual(self.input_shape, [1, 1, 320, 320]) return ExportTest - - class TestInferenceEff(create_export_test_without_gnn()): 'Test case without gnn' -class TestInference(create_export_test_with_gnn()): - 'Test case with gnn' - if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() From 023a75ad424fe651ea632fd32086e10aa135736c Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 18:00:33 +0530 Subject: [PATCH 38/47] splitting train_utils --- .../src/utils/train_utils.py | 456 +----------------- .../src/utils/train_utils_cnn.py | 283 +++++++++++ .../src/utils/train_utils_gnn.py | 169 +++++++ 3 files changed, 453 insertions(+), 455 deletions(-) create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 8d8f0d97ba3..bf60071710e 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -9,9 +9,7 @@ import copy from torch_geometric.data import Data as Data_GNN from torch_geometric.data import DataLoader as DataLoader_GNN -from .inference_utils import inference from .transformations import train_transform, test_transform -from .misc import aggregate_local_weights, compute_lcl_wt, save_model_weights # Train 1 batch update def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, @@ -62,7 +60,6 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim optim2.step() optim3.step() return cnv_lyr, backbone_model, fc_layers, loss, optim1, optim2, optim3 - #if training is with gnn if trn_typ=='full': optim1.step() @@ -72,7 +69,6 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim optim4.step() return cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4 - #### Train main def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, train_loader, trn_typ, n_batches, criterion, device, @@ -81,9 +77,7 @@ def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, cnv_lyr.train() backbone_model.train() fc_layers.train() - - ########## Optimizers and Schedulers ############# - + # optimizer optim1 = torch.optim.Adam(cnv_lyr.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) optim2 = torch.optim.Adam(backbone_model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) @@ -118,50 +112,6 @@ def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, return cnv_lyr, backbone_model, fc_layers, gnn_model -####For training with gnn -def lcl_train_gnn(lr, trn_loader, val_loader, criterion, cnv_lyr, - backbone_model,fc_layers, gnn_model, edge_index, edge_attr, device): - - n_batches=1500 - ####### Freeze and train the part which is specific to each site only - print('Freeze global CNN, fine-tune GNN ...') - cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, - fc_layers, gnn_model, trn_loader,'gnn', n_batches, criterion, device, - edge_index, edge_attr) - - ###### Compute the Validation accuracy ####### - print('Computing Validation Performance ...') - prev_val=inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, criterion, device, - edge_index, edge_attr) - - ######## Train the entire network in an end-to-end manner ### - print('Train end-to-end for Local Site ...') - cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, - gnn_model, trn_loader,'full', 2*n_batches, criterion, device, - edge_index, edge_attr) - - cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) - backbone_wt=copy.deepcopy(backbone_model.state_dict()) - fc_wt=copy.deepcopy(fc_layers.state_dict()) - gnn_wt=copy.deepcopy(gnn_model.state_dict()) - - return prev_val, cnv_wt,backbone_wt, fc_wt, gnn_wt - -####For training without gnn -def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc_layers, device): - n_batches = 4000 - ###### Compute the Validation accuracy ####### - prev_val=inference(cnv_lyr1, backbone_model, fc_layers, None, val_loader, criterion, device) - - ######## Train the entire network in an end-to-end manner ### - train_end_to_end(lr, cnv_lyr1, backbone_model, fc_layers, None, trn_loader, None, n_batches, criterion, device) - - - cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - backbone_wt=copy.deepcopy(backbone_model.state_dict()) - fc_wt=copy.deepcopy(fc_layers.state_dict()) - return prev_val, cnv_lyr1_wt,backbone_wt, fc_wt - def initialize_training(site, img_pth, split_npz, train_transform, test_transform, b_sz, device): data_trn=construct_dataset(img_pth, split_npz, site, train_transform, tn_vl_idx=0) @@ -173,11 +123,8 @@ def initialize_training(site, img_pth, split_npz, train_transform, test_transfor criterion=Custom_Loss(site, device) edge_index, edge_attr= compute_adjacency_matrix('confusion_matrix', site, split_npz) - return trn_loader, val_loader, criterion, edge_index, edge_attr - - def initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model): cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) @@ -189,7 +136,6 @@ def initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model): def instantiate_architecture(ftr_dim, model_name, gnn=False): # If gnn=True, then instantiate the GNN architecture - if model_name=='densenet': inp_dim=1024 backbone_model=models.densenet121(pretrained=True) @@ -204,7 +150,6 @@ def instantiate_architecture(ftr_dim, model_name, gnn=False): backbone_model=xception.xception(pretrained=True) backbone_model.fc=nn.Identity() - cnv_lyr=First_Conv() fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim) if gnn: @@ -213,405 +158,6 @@ def instantiate_architecture(ftr_dim, model_name, gnn=False): return cnv_lyr, backbone_model, fc_layers - -#Main function for training -def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transform, - max_epochs, backbone, device, restart_checkpoint='', savepoint=''): - - ###### Instantiate the CNN-GNN Architecture ############## - cnv_lyr, backbone_model, fc_layers, gnn_model=instantiate_architecture(ftr_dim=512, model_name=backbone, gnn=True) - cnv_lyr = cnv_lyr.to(device) - fc_layers = fc_layers.to(device) - backbone_model = backbone_model.to(device) - gnn_model = gnn_model.to(device) - - - ############## Initialize Data Loaders ################# - - trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, - train_transform, test_transform, b_sz, device=device) - - trn_loader1, val_loader1, criterion1, edge_index1, edge_attr1=initialize_training(1, img_pth, split_npz, - train_transform, test_transform, b_sz, device=device) - - trn_loader2, val_loader2, criterion2, edge_index2, edge_attr2=initialize_training(2, img_pth, split_npz, - train_transform, test_transform, b_sz, device=device) - - trn_loader3, val_loader3, criterion3, edge_index3, edge_attr3=initialize_training(3, img_pth, split_npz, - train_transform, test_transform, b_sz, device=device) - - trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, - train_transform, test_transform, b_sz, device=device) - - - ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone - #and identical model weights for the other layers. - - - glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, - fc_layers, gnn_model) - sit0_gnn_wt=copy.deepcopy(gnn_wt) - sit1_gnn_wt=copy.deepcopy(gnn_wt) - sit2_gnn_wt=copy.deepcopy(gnn_wt) - sit3_gnn_wt=copy.deepcopy(gnn_wt) - sit4_gnn_wt=copy.deepcopy(gnn_wt) - - del gnn_wt - # Load previous checkpoint if resuming the training else comment out - if restart_checkpoint!='': - checkpoint=torch.load(restart_checkpoint) - glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] - glbl_backbone_wt=checkpoint['backbone_model_state_dict'] - glbl_fc_wt=checkpoint['fc_layers_state_dict'] - sit0_gnn_wt=checkpoint['sit0_gnn_model'] - sit1_gnn_wt=checkpoint['sit1_gnn_model'] - sit2_gnn_wt=checkpoint['sit2_gnn_model'] - sit3_gnn_wt=checkpoint['sit3_gnn_model'] - sit4_gnn_wt=checkpoint['sit4_gnn_model'] - - - ################ Begin Actual Training ############ - max_val=0 - for epoch in range(0, max_epochs): - print('############ Epoch: '+str(epoch)+' #################') - - ###### Load the global model weights ######## - cnv_lyr.load_state_dict(glbl_cnv_wt) - backbone_model.load_state_dict(glbl_backbone_wt) - fc_layers.load_state_dict(glbl_fc_wt) - gnn_model.load_state_dict(sit0_gnn_wt) - - print('\n \n SITE 0 \n') - prv_val0, sit0_cnv_wt,sit0_backbone_wt, sit0_fc_wt, sit0_gnn_wt=lcl_train_gnn(lr, trn_loader0, val_loader0, - criterion0, cnv_lyr, backbone_model,fc_layers, gnn_model, - edge_index0, edge_attr0, device) - - cnv_lyr.load_state_dict(glbl_cnv_wt) - backbone_model.load_state_dict(glbl_backbone_wt) - fc_layers.load_state_dict(glbl_fc_wt) - gnn_model.load_state_dict(sit1_gnn_wt) - - print('\n \n SITE 1 \n') - prv_val1, sit1_cnv_wt,sit1_backbone_wt, sit1_fc_wt, sit1_gnn_wt=lcl_train_gnn(lr, trn_loader1, val_loader1, - criterion1, cnv_lyr, backbone_model,fc_layers, gnn_model, - edge_index1, edge_attr1, device) - - cnv_lyr.load_state_dict(glbl_cnv_wt) - backbone_model.load_state_dict(glbl_backbone_wt) - fc_layers.load_state_dict(glbl_fc_wt) - gnn_model.load_state_dict(sit2_gnn_wt) - - print('\n \n SITE 2 \n') - prv_val2, sit2_cnv_wt,sit2_backbone_wt, sit2_fc_wt, sit2_gnn_wt=lcl_train_gnn(lr, trn_loader2, val_loader2, - criterion2, cnv_lyr, backbone_model,fc_layers, gnn_model, - edge_index2, edge_attr2, device) - - cnv_lyr.load_state_dict(glbl_cnv_wt) - backbone_model.load_state_dict(glbl_backbone_wt) - fc_layers.load_state_dict(glbl_fc_wt) - gnn_model.load_state_dict(sit3_gnn_wt) - - print('\n \n SITE 3 \n') - prv_val3, sit3_cnv_wt,sit3_backbone_wt, sit3_fc_wt, sit3_gnn_wt=lcl_train_gnn(lr, trn_loader3, val_loader3, - criterion3, cnv_lyr, backbone_model,fc_layers, gnn_model, - edge_index3, edge_attr3, device) - - cnv_lyr.load_state_dict(glbl_cnv_wt) - backbone_model.load_state_dict(glbl_backbone_wt) - fc_layers.load_state_dict(glbl_fc_wt) - gnn_model.load_state_dict(sit4_gnn_wt) - - print('\n \n SITE 4 \n') - prv_val4, sit4_cnv_wt,sit4_backbone_wt, sit4_fc_wt, sit4_gnn_wt=lcl_train_gnn(lr, trn_loader4, val_loader4, - criterion4, cnv_lyr, backbone_model,fc_layers, gnn_model, - edge_index4, edge_attr4, device) - - - avg_val=(prv_val0+prv_val1+prv_val2+prv_val3+prv_val4)/5 - print('Avg Val AUC: '+str(avg_val)) - - if avg_val>max_val: - max_val=avg_val - mx_nm=savepoint+'best_weight_'+str(max_val)+'_'+str(epoch)+'.pt' - save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, - glbl_fc_wt, sit0_gnn_wt, sit1_gnn_wt, - sit2_gnn_wt, sit3_gnn_wt, sit4_gnn_wt) - print('Validation Performance Improved !') - - - ############### Compute the global model weights ############# - - glbl_cnv_wt=aggregate_local_weights(sit0_cnv_wt, sit1_cnv_wt, sit2_cnv_wt, - sit3_cnv_wt, sit4_cnv_wt, device) - - glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, - sit3_backbone_wt, sit4_backbone_wt, device) - - glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) - - -def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_transform, - test_transform, max_epochs, backbone, device, checkpoint='', savepath=''): - - cnv_lyr1, backbone_model, fc_layers = instantiate_architecture(ftr_dim=512, model_name=backbone) - cnv_lyr1 = cnv_lyr1.to(device) - backbone_model = backbone_model.to(device) - fc_layers = fc_layers.to(device) - if checkpoint!='': - checkpoint=torch.load(checkpoint) - ######The wights saved for model without gnn have cnv_lyr1_state_dict instead of cnv_lyr_state_dict.......but for trial weights for with gnn are used here - cnv_wt=checkpoint['cnv_lyr_state_dict'] - backbone_wt=checkpoint['backbone_model_state_dict'] - fc_wt=checkpoint['fc_layers_state_dict'] - cnv_lyr1.load_state_dict(cnv_wt) - backbone_model.load_state_dict(backbone_wt) - fc_layers.load_state_dict(fc_wt) - - ### Dataloaders and model weights for each site - # Site-0 - data_trn0=construct_dataset(img_pth, split_npz, site=0, transforms=train_transform, tn_vl_idx=0) - trn_loader0=DataLoader(data_trn0,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transform, tn_vl_idx=1) - val_loader0=DataLoader(data_val0, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - # Site-1 - data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transform, tn_vl_idx=0) - trn_loader1=DataLoader(data_trn1,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transform, tn_vl_idx=1) - val_loader1=DataLoader(data_val1, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - # Site-2 - data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transform, tn_vl_idx=0) - trn_loader2=DataLoader(data_trn2,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transform, tn_vl_idx=1) - val_loader2=DataLoader(data_val2, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - # Site-3 - data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transform, tn_vl_idx=0) - trn_loader3=DataLoader(data_trn3,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transform, tn_vl_idx=1) - val_loader3=DataLoader(data_val3, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - - # Site-4 - data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transform, tn_vl_idx=0) - trn_loader4=DataLoader(data_trn4,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transform, tn_vl_idx=1) - val_loader4=DataLoader(data_val4, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - - criterion = Custom_Loss(site=-999,device=device) - - - ###### Initialize model weights with pre-trained weights - ## Global - glbl_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - glbl_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - glbl_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ## Site0 - sit0_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - sit0_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - sit0_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ## Site 1 - sit1_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - sit1_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - sit1_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ## Site 2 - sit2_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - sit2_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - sit2_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ## Site 3 - sit3_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - sit3_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - sit3_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ## Site 4 - sit4_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) - sit4_backbone_wt=copy.deepcopy(backbone_model.state_dict()) - sit4_fc_wt=copy.deepcopy(fc_layers.state_dict()) - - ###### Now begin training - max_val=0 - for epoch in range(0, max_epochs): - - ############ Perform the local trainings for each site ##### - ## Site 0 - print('\n \n SITE 0 \n') - tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit0_cnv_lyr1_wt, device) - tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit0_backbone_wt, device) - tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit0_fc_wt, device) - # Load the weights - cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) - backbone_model.load_state_dict(tmp_backbone_wt) - fc_layers.load_state_dict(tmp_fc_wt) - - prev_val0, sit0_cnv_lyr1_wt,sit0_backbone_wt, sit0_fc_wt=lcl_train(lr, trn_loader0, val_loader0, criterion, - cnv_lyr1, backbone_model,fc_layers, device ) - - del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - - ## Site 1 - print('\n \n SITE 1 \n') - tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit1_cnv_lyr1_wt, device) - tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit1_backbone_wt, device) - tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit1_fc_wt, device) - # Load the weights - cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) - backbone_model.load_state_dict(tmp_backbone_wt) - fc_layers.load_state_dict(tmp_fc_wt) - - prev_val1, sit1_cnv_lyr1_wt,sit1_backbone_wt, sit1_fc_wt=lcl_train(lr, trn_loader1, val_loader1, criterion, - cnv_lyr1, backbone_model,fc_layers, device ) - - del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - - ## Site 2 - print('\n \n SITE 2 \n') - tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit2_cnv_lyr1_wt, device) - tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit2_backbone_wt, device) - tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit2_fc_wt, device) - # Load the weights - cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) - backbone_model.load_state_dict(tmp_backbone_wt) - fc_layers.load_state_dict(tmp_fc_wt) - - prev_val2, sit2_cnv_lyr1_wt,sit2_backbone_wt, sit2_fc_wt=lcl_train(lr, trn_loader2, val_loader2, criterion, - cnv_lyr1, backbone_model,fc_layers, device ) - - del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - - ## Site 3 - print('\n \n SITE 3 \n') - tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit3_cnv_lyr1_wt, device) - tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit3_backbone_wt, device) - tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit3_fc_wt, device) - # Load the weights - cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) - backbone_model.load_state_dict(tmp_backbone_wt) - fc_layers.load_state_dict(tmp_fc_wt) - - prev_val3, sit3_cnv_lyr1_wt,sit3_backbone_wt, sit3_fc_wt=lcl_train(lr, trn_loader3, val_loader3, criterion, - cnv_lyr1, backbone_model,fc_layers , device) - - del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - - ## Site 4 - print('\n \n SITE 4 \n') - tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) - tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit4_backbone_wt, device) - tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit4_fc_wt, device) - # Load the weights - cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) - backbone_model.load_state_dict(tmp_backbone_wt) - fc_layers.load_state_dict(tmp_fc_wt) - - prev_val4, sit4_cnv_lyr1_wt,sit4_backbone_wt, sit4_fc_wt=lcl_train(lr, trn_loader4, val_loader4, criterion, - cnv_lyr1, backbone_model,fc_layers , device) - - del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - - avg_val=(prev_val0+prev_val1+prev_val2+prev_val3+prev_val4)/5 - - if avg_val>max_val: - max_val=avg_val - # save model weight, local weights - torch.save({ - 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, - 'backbone_model_state_dict': glbl_backbone_wt, - 'fc_layers_state_dict': glbl_fc_wt, - }, savepath+'best_glbl_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, - 'backbone_model_state_dict': sit0_backbone_wt, - 'fc_layers_state_dict': sit0_fc_wt, - }, savepath+'best_site0_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, - 'backbone_model_state_dict': sit1_backbone_wt, - 'fc_layers_state_dict': sit1_fc_wt, - }, savepath+'best_site1_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, - 'backbone_model_state_dict': sit2_backbone_wt, - 'fc_layers_state_dict': sit2_fc_wt, - }, savepath+'best_site2_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, - 'backbone_model_state_dict': sit3_backbone_wt, - 'fc_layers_state_dict': sit3_fc_wt, - }, savepath+'best_site3_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, - 'backbone_model_state_dict': sit4_backbone_wt, - 'fc_layers_state_dict': sit4_fc_wt, - }, savepath+'best_site4_weights.pth') - - ######### aggregate to compute global weight ############### - - glbl_cnv_lyr1_wt=aggregate_local_weights(sit0_cnv_lyr1_wt, sit1_cnv_lyr1_wt, sit2_cnv_lyr1_wt, - sit3_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) - - glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, - sit3_backbone_wt, sit4_backbone_wt, device) - - glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) - - - ###### Just before returning, save the final weights - - # save model weight, local weights - torch.save({ - 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, - 'backbone_model_state_dict': glbl_backbone_wt, - 'fc_layers_state_dict': glbl_fc_wt, - }, savepath+'final_glbl_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, - 'backbone_model_state_dict': sit0_backbone_wt, - 'fc_layers_state_dict': sit0_fc_wt, - }, savepath+'final_site0_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, - 'backbone_model_state_dict': sit1_backbone_wt, - 'fc_layers_state_dict': sit1_fc_wt, - }, savepath+'final_site1_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, - 'backbone_model_state_dict': sit2_backbone_wt, - 'fc_layers_state_dict': sit2_fc_wt, - }, savepath+'final_site2_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, - 'backbone_model_state_dict': sit3_backbone_wt, - 'fc_layers_state_dict': sit3_fc_wt, - }, savepath+'final_site3_weights.pth') - - torch.save({ - 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, - 'backbone_model_state_dict': sit4_backbone_wt, - 'fc_layers_state_dict': sit4_fc_wt, - }, savepath+'final_site4_weights.pth') - - return - def train_model(config): if torch.cuda.is_available() and config['gpu']=='True': device = torch.device('cuda') diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py new file mode 100644 index 00000000000..2b8babf119f --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py @@ -0,0 +1,283 @@ +import torch +import copy +from .inference_utils import inference +from .train_utils import train_end_to_end, instantiate_architecture +from .dataloader import construct_dataset +from torch.utils.data import DataLoader +from .loss import Custom_Loss +from .misc import compute_lcl_wt, aggregate_local_weights + +def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc_layers, device): + n_batches = 4000 + ###### Compute the Validation accuracy ####### + prev_val=inference(cnv_lyr1, backbone_model, fc_layers, None, val_loader, criterion, device) + + ######## Train the entire network in an end-to-end manner ### + train_end_to_end(lr, cnv_lyr1, backbone_model, fc_layers, None, trn_loader, None, n_batches, criterion, device) + + + cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + backbone_wt=copy.deepcopy(backbone_model.state_dict()) + fc_wt=copy.deepcopy(fc_layers.state_dict()) + return prev_val, cnv_lyr1_wt,backbone_wt, fc_wt + +def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_transform, + test_transform, max_epochs, backbone, device, checkpoint='', savepath=''): + + cnv_lyr1, backbone_model, fc_layers = instantiate_architecture(ftr_dim=512, model_name=backbone) + cnv_lyr1 = cnv_lyr1.to(device) + backbone_model = backbone_model.to(device) + fc_layers = fc_layers.to(device) + if checkpoint!='': + checkpoint=torch.load(checkpoint) + ######The wights saved for model without gnn have cnv_lyr1_state_dict instead of cnv_lyr_state_dict.......but for trial weights for with gnn are used here + cnv_wt=checkpoint['cnv_lyr_state_dict'] + backbone_wt=checkpoint['backbone_model_state_dict'] + fc_wt=checkpoint['fc_layers_state_dict'] + cnv_lyr1.load_state_dict(cnv_wt) + backbone_model.load_state_dict(backbone_wt) + fc_layers.load_state_dict(fc_wt) + + ### Dataloaders and model weights for each site + # Site-0 + data_trn0=construct_dataset(img_pth, split_npz, site=0, transforms=train_transform, tn_vl_idx=0) + trn_loader0=DataLoader(data_trn0,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transform, tn_vl_idx=1) + val_loader0=DataLoader(data_val0, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-1 + data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transform, tn_vl_idx=0) + trn_loader1=DataLoader(data_trn1,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transform, tn_vl_idx=1) + val_loader1=DataLoader(data_val1, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-2 + data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transform, tn_vl_idx=0) + trn_loader2=DataLoader(data_trn2,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transform, tn_vl_idx=1) + val_loader2=DataLoader(data_val2, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + # Site-3 + data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transform, tn_vl_idx=0) + trn_loader3=DataLoader(data_trn3,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transform, tn_vl_idx=1) + val_loader3=DataLoader(data_val3, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + + # Site-4 + data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transform, tn_vl_idx=0) + trn_loader4=DataLoader(data_trn4,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) + data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transform, tn_vl_idx=1) + val_loader4=DataLoader(data_val4, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) + + + criterion = Custom_Loss(site=-999,device=device) + + + ###### Initialize model weights with pre-trained weights + ## Global + glbl_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + glbl_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + glbl_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site0 + sit0_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit0_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit0_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 1 + sit1_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit1_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit1_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 2 + sit2_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit2_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit2_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 3 + sit3_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit3_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit3_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ## Site 4 + sit4_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) + sit4_backbone_wt=copy.deepcopy(backbone_model.state_dict()) + sit4_fc_wt=copy.deepcopy(fc_layers.state_dict()) + + ###### Now begin training + max_val=0 + for epoch in range(0, max_epochs): + + ############ Perform the local trainings for each site ##### + ## Site 0 + print('\n \n SITE 0 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit0_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit0_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit0_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val0, sit0_cnv_lyr1_wt,sit0_backbone_wt, sit0_fc_wt=lcl_train(lr, trn_loader0, val_loader0, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 1 + print('\n \n SITE 1 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit1_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit1_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit1_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val1, sit1_cnv_lyr1_wt,sit1_backbone_wt, sit1_fc_wt=lcl_train(lr, trn_loader1, val_loader1, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 2 + print('\n \n SITE 2 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit2_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit2_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit2_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val2, sit2_cnv_lyr1_wt,sit2_backbone_wt, sit2_fc_wt=lcl_train(lr, trn_loader2, val_loader2, criterion, + cnv_lyr1, backbone_model,fc_layers, device ) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 3 + print('\n \n SITE 3 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit3_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit3_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit3_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val3, sit3_cnv_lyr1_wt,sit3_backbone_wt, sit3_fc_wt=lcl_train(lr, trn_loader3, val_loader3, criterion, + cnv_lyr1, backbone_model,fc_layers , device) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + ## Site 4 + print('\n \n SITE 4 \n') + tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) + tmp_backbone_wt=compute_lcl_wt(epoch, avg_schedule, glbl_backbone_wt, sit4_backbone_wt, device) + tmp_fc_wt=compute_lcl_wt(epoch, avg_schedule, glbl_fc_wt, sit4_fc_wt, device) + # Load the weights + cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) + backbone_model.load_state_dict(tmp_backbone_wt) + fc_layers.load_state_dict(tmp_fc_wt) + + prev_val4, sit4_cnv_lyr1_wt,sit4_backbone_wt, sit4_fc_wt=lcl_train(lr, trn_loader4, val_loader4, criterion, + cnv_lyr1, backbone_model,fc_layers , device) + + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt + + + avg_val=(prev_val0+prev_val1+prev_val2+prev_val3+prev_val4)/5 + + if avg_val>max_val: + max_val=avg_val + # save model weight, local weights + torch.save({ + 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + }, savepath+'best_glbl_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, + 'backbone_model_state_dict': sit0_backbone_wt, + 'fc_layers_state_dict': sit0_fc_wt, + }, savepath+'best_site0_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, + 'backbone_model_state_dict': sit1_backbone_wt, + 'fc_layers_state_dict': sit1_fc_wt, + }, savepath+'best_site1_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, + 'backbone_model_state_dict': sit2_backbone_wt, + 'fc_layers_state_dict': sit2_fc_wt, + }, savepath+'best_site2_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, + 'backbone_model_state_dict': sit3_backbone_wt, + 'fc_layers_state_dict': sit3_fc_wt, + }, savepath+'best_site3_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, + 'backbone_model_state_dict': sit4_backbone_wt, + 'fc_layers_state_dict': sit4_fc_wt, + }, savepath+'best_site4_weights.pth') + + ######### aggregate to compute global weight ############### + + glbl_cnv_lyr1_wt=aggregate_local_weights(sit0_cnv_lyr1_wt, sit1_cnv_lyr1_wt, sit2_cnv_lyr1_wt, + sit3_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) + + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, + sit3_backbone_wt, sit4_backbone_wt, device) + + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) + + + ###### Just before returning, save the final weights + + # save model weight, local weights + torch.save({ + 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, + 'backbone_model_state_dict': glbl_backbone_wt, + 'fc_layers_state_dict': glbl_fc_wt, + }, savepath+'final_glbl_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, + 'backbone_model_state_dict': sit0_backbone_wt, + 'fc_layers_state_dict': sit0_fc_wt, + }, savepath+'final_site0_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, + 'backbone_model_state_dict': sit1_backbone_wt, + 'fc_layers_state_dict': sit1_fc_wt, + }, savepath+'final_site1_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, + 'backbone_model_state_dict': sit2_backbone_wt, + 'fc_layers_state_dict': sit2_fc_wt, + }, savepath+'final_site2_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, + 'backbone_model_state_dict': sit3_backbone_wt, + 'fc_layers_state_dict': sit3_fc_wt, + }, savepath+'final_site3_weights.pth') + + torch.save({ + 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, + 'backbone_model_state_dict': sit4_backbone_wt, + 'fc_layers_state_dict': sit4_fc_wt, + }, savepath+'final_site4_weights.pth') + return diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py new file mode 100644 index 00000000000..cdfa898e480 --- /dev/null +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py @@ -0,0 +1,169 @@ +import torch +from .train_utils import train_end_to_end, initialize_training +from .inference_utils import inference +from .train_utils import initialize_model_weights, instantiate_architecture +from .misc import aggregate_local_weights, save_model_weights +import copy + +def lcl_train_gnn(lr, trn_loader, val_loader, criterion, cnv_lyr, + backbone_model,fc_layers, gnn_model, edge_index, edge_attr, device): + + n_batches=1500 + ####### Freeze and train the part which is specific to each site only + print('Freeze global CNN, fine-tune GNN ...') + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, + fc_layers, gnn_model, trn_loader,'gnn', n_batches, criterion, device, + edge_index, edge_attr) + + ###### Compute the Validation accuracy ####### + print('Computing Validation Performance ...') + prev_val=inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, criterion, device, + edge_index, edge_attr) + + ######## Train the entire network in an end-to-end manner ### + print('Train end-to-end for Local Site ...') + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, + gnn_model, trn_loader,'full', 2*n_batches, criterion, device, + edge_index, edge_attr) + + cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) + backbone_wt=copy.deepcopy(backbone_model.state_dict()) + fc_wt=copy.deepcopy(fc_layers.state_dict()) + gnn_wt=copy.deepcopy(gnn_model.state_dict()) + + return prev_val, cnv_wt,backbone_wt, fc_wt, gnn_wt + +#Main function for training +def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transform, + max_epochs, backbone, device, restart_checkpoint='', savepoint=''): + + ###### Instantiate the CNN-GNN Architecture ############## + cnv_lyr, backbone_model, fc_layers, gnn_model=instantiate_architecture(ftr_dim=512, model_name=backbone, gnn=True) + cnv_lyr = cnv_lyr.to(device) + fc_layers = fc_layers.to(device) + backbone_model = backbone_model.to(device) + gnn_model = gnn_model.to(device) + + + ############## Initialize Data Loaders ################# + + trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader1, val_loader1, criterion1, edge_index1, edge_attr1=initialize_training(1, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader2, val_loader2, criterion2, edge_index2, edge_attr2=initialize_training(2, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader3, val_loader3, criterion3, edge_index3, edge_attr3=initialize_training(3, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, + train_transform, test_transform, b_sz, device=device) + + + ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone + #and identical model weights for the other layers. + + + glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, + fc_layers, gnn_model) + sit0_gnn_wt=copy.deepcopy(gnn_wt) + sit1_gnn_wt=copy.deepcopy(gnn_wt) + sit2_gnn_wt=copy.deepcopy(gnn_wt) + sit3_gnn_wt=copy.deepcopy(gnn_wt) + sit4_gnn_wt=copy.deepcopy(gnn_wt) + + del gnn_wt + # Load previous checkpoint if resuming the training else comment out + if restart_checkpoint!='': + checkpoint=torch.load(restart_checkpoint) + glbl_cnv_wt=checkpoint['cnv_lyr_state_dict'] + glbl_backbone_wt=checkpoint['backbone_model_state_dict'] + glbl_fc_wt=checkpoint['fc_layers_state_dict'] + sit0_gnn_wt=checkpoint['sit0_gnn_model'] + sit1_gnn_wt=checkpoint['sit1_gnn_model'] + sit2_gnn_wt=checkpoint['sit2_gnn_model'] + sit3_gnn_wt=checkpoint['sit3_gnn_model'] + sit4_gnn_wt=checkpoint['sit4_gnn_model'] + + + ################ Begin Actual Training ############ + max_val=0 + for epoch in range(0, max_epochs): + print('############ Epoch: '+str(epoch)+' #################') + + ###### Load the global model weights ######## + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit0_gnn_wt) + + print('\n \n SITE 0 \n') + prv_val0, sit0_cnv_wt,sit0_backbone_wt, sit0_fc_wt, sit0_gnn_wt=lcl_train_gnn(lr, trn_loader0, val_loader0, + criterion0, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index0, edge_attr0, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit1_gnn_wt) + + print('\n \n SITE 1 \n') + prv_val1, sit1_cnv_wt,sit1_backbone_wt, sit1_fc_wt, sit1_gnn_wt=lcl_train_gnn(lr, trn_loader1, val_loader1, + criterion1, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index1, edge_attr1, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit2_gnn_wt) + + print('\n \n SITE 2 \n') + prv_val2, sit2_cnv_wt,sit2_backbone_wt, sit2_fc_wt, sit2_gnn_wt=lcl_train_gnn(lr, trn_loader2, val_loader2, + criterion2, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index2, edge_attr2, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit3_gnn_wt) + + print('\n \n SITE 3 \n') + prv_val3, sit3_cnv_wt,sit3_backbone_wt, sit3_fc_wt, sit3_gnn_wt=lcl_train_gnn(lr, trn_loader3, val_loader3, + criterion3, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index3, edge_attr3, device) + + cnv_lyr.load_state_dict(glbl_cnv_wt) + backbone_model.load_state_dict(glbl_backbone_wt) + fc_layers.load_state_dict(glbl_fc_wt) + gnn_model.load_state_dict(sit4_gnn_wt) + + print('\n \n SITE 4 \n') + prv_val4, sit4_cnv_wt,sit4_backbone_wt, sit4_fc_wt, sit4_gnn_wt=lcl_train_gnn(lr, trn_loader4, val_loader4, + criterion4, cnv_lyr, backbone_model,fc_layers, gnn_model, + edge_index4, edge_attr4, device) + + + avg_val=(prv_val0+prv_val1+prv_val2+prv_val3+prv_val4)/5 + print('Avg Val AUC: '+str(avg_val)) + + if avg_val>max_val: + max_val=avg_val + mx_nm=savepoint+'best_weight_'+str(max_val)+'_'+str(epoch)+'.pt' + save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, + glbl_fc_wt, sit0_gnn_wt, sit1_gnn_wt, + sit2_gnn_wt, sit3_gnn_wt, sit4_gnn_wt) + print('Validation Performance Improved !') + + + ############### Compute the global model weights ############# + + glbl_cnv_wt=aggregate_local_weights(sit0_cnv_wt, sit1_cnv_wt, sit2_cnv_wt, + sit3_cnv_wt, sit4_cnv_wt, device) + + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, + sit3_backbone_wt, sit4_backbone_wt, device) + + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) \ No newline at end of file From 61d38271ec60bc8a52df720e1cf3a52774a370a0 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 19:12:18 +0530 Subject: [PATCH 39/47] updating model weights links --- .../configs/download_configs.json | 8 ++++---- .../configs/fl_with_gnn.json | 12 ++++++------ .../configs/fl_without_gnn.json | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json index 8d4a021df59..8680da85e48 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json @@ -6,11 +6,11 @@ "dest_path_split": "dataset/split.zip" }, "fl_with_gnn":{ - "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model.zip", - "dest_path_model": "model_weights/with_gnn/checkpoint.zip" + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model_weights_w_gnn.zip", + "dest_path_model": "model_weights/with_gnn/model_weights_w_gnn.zip" }, "fl_without_gnn":{ - "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model.zip", - "dest_path_model": "model_weights/without_gnn/checkpoint.zip" + "url_model": "http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model_weights_wo_gnn.zip", + "dest_path_model": "model_weights/without_gnn/model_weights_wo_gnn.zip" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json index a8601d93ba7..4f8329a7017 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_with_gnn.json @@ -6,7 +6,7 @@ "epochs": 1, "gpu": "True", "lr": 1e-5, - "checkpoint": "model_weights/with_gnn/checkpoint.zip", + "checkpoint": "model_weights/with_gnn/model_weights_w_gnn.pt", "savepath": "model_weights/with_gnn/", "backbone":"resnet", "gnn":"True" @@ -17,13 +17,13 @@ "batch_size": 1, "gpu": "True", "gnn":"True", - "model_file": "model_weights/with_gnn/checkpoint.zip", - "checkpoint": "model_weights/with_gnn/model.onnx", + "model_file": "model_weights/with_gnn/model_weights_w_gnn.pt", + "checkpoint": "model_weights/with_gnn/model_weights_w_gnn.pt", "backbone":"resnet", "max_samples":10 }, "export": { - "checkpoint": "model_weights/with_gnn/checkpoint.zip", + "checkpoint": "model_weights/with_gnn/model_weights_w_gnn.pt", "backbone":"resnet", "split_path":"dataset/split.npz", "input_shape": [ @@ -32,7 +32,7 @@ 320, 320 ], - "model_name_onnx": "model.onnx", - "model_name": "model" + "model_name_onnx": "model_weights_w_gnn.onnx", + "model_name": "model_weights_w_gnn" } } \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json index 95f51a57ba1..380fda55b9d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/fl_without_gnn.json @@ -6,7 +6,7 @@ "epochs": 1, "gpu": "True", "lr": 1e-5, - "checkpoint": "model_weights/without_gnn/checkpoint.zip", + "checkpoint": "model_weights/without_gnn/model_weights_wo_gnn.pth", "savepath": "model_weights/without_gnn/", "backbone":"resnet", "gnn":"False" @@ -17,13 +17,13 @@ "batch_size": 1, "gpu": "True", "gnn":"False", - "model_file": "model_weights/without_gnn/checkpoint.zip", - "checkpoint": "model_weights/without_gnn/model.pt", + "model_file": "model_weights/without_gnn/model_weights_wo_gnn.pth", + "checkpoint": "model_weights/without_gnn/model_weights_wo_gnn.pth", "backbone":"resnet", "max_samples":10 }, "export": { - "checkpoint": "model_weights/without_gnn/checkpoint.zip", + "checkpoint": "model_weights/without_gnn/model_weights_wo_gnn.pth", "backbone":"resnet", "split_path":"dataset/split.npz", "input_shape": [ @@ -32,7 +32,7 @@ 320, 320 ], - "model_name_onnx": "model.onnx", - "model_name": "model" + "model_name_onnx": "model_weights_wo_gnn.onnx", + "model_name": "model_weights_wo_gnn" } } \ No newline at end of file From d515b1c60f0f0a1b8c72b3fe6276ff244551b7b9 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 19:13:03 +0530 Subject: [PATCH 40/47] Update README.md --- .../chest_xray_screening_federated_gcn/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index 316ca56dda1..d365a162e4f 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -63,6 +63,8 @@ federated_chest_screening/ metric.py misc.py model.py + train_utils_cnn.py + train_utils_gnn.py train_utils.py transformation.py export.py From 82a988b0f3764b79e434eb8ee5cb33553921c819 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal <96430522+Kasliwal17@users.noreply.github.com> Date: Tue, 21 Feb 2023 19:15:00 +0530 Subject: [PATCH 41/47] splitting train_utils file --- .../src/train.py | 8 +++++-- .../src/utils/train_utils.py | 17 -------------- .../src/utils/train_utils_cnn.py | 12 ++++++++++ .../src/utils/train_utils_gnn.py | 23 +++++++++++-------- .../test/test_train.py | 7 +++--- 5 files changed, 34 insertions(+), 33 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py index ef4d939e778..a1fecf4e766 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/train.py @@ -1,4 +1,5 @@ -from utils.train_utils import train_model +from utils.train_utils_cnn import train_model +from utils.train_utils_gnn import train_model as train_model_gnn import argparse def main(args): config = { @@ -13,7 +14,10 @@ def main(args): 'checkpoint': args.checkpoint, 'savepath': args.savepath, } - train_model(config) + if args.gnn == 'True': + train_model_gnn(config) + else: + train_model(config) if __name__ == '__main__': diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index bf60071710e..bd03407c128 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -157,20 +157,3 @@ def instantiate_architecture(ftr_dim, model_name, gnn=False): return cnv_lyr, backbone_model, fc_layers, gnn_model return cnv_lyr, backbone_model, fc_layers - -def train_model(config): - if torch.cuda.is_available() and config['gpu']=='True': - device = torch.device('cuda') - else: - device = torch.device('cpu') - if config['gnn']=="True": - trainer_with_GNN(config['lr'],config['batch_size'], config['data'], config['split_npz'], - train_transform, test_transform, config['epochs'], config['backbone'], - device, config['checkpoint'], config['savepath'] ) - else: - avg_schedule = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] - trainer_without_GNN(avg_schedule, config['lr'], config['batch_size'], config['data'], - config['split_npz'], train_transform, test_transform, config['epochs'], - config['backbone'], device, config['checkpoint'], config['savepath']) - - diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py index 2b8babf119f..d50acd910f3 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py @@ -6,6 +6,7 @@ from torch.utils.data import DataLoader from .loss import Custom_Loss from .misc import compute_lcl_wt, aggregate_local_weights +from .transformations import train_transform, test_transform def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc_layers, device): n_batches = 4000 @@ -281,3 +282,14 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans 'fc_layers_state_dict': sit4_fc_wt, }, savepath+'final_site4_weights.pth') return + +def train_model(config): + if torch.cuda.is_available() and config['gpu']=='True': + device = torch.device('cuda') + else: + device = torch.device('cpu') + avg_schedule = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + trainer_without_GNN(avg_schedule, config['lr'], config['batch_size'], config['data'], + config['split_npz'], train_transform, test_transform, config['epochs'], + config['backbone'], device, config['checkpoint'], config['savepath']) + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py index cdfa898e480..cf40214bcb6 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py @@ -3,6 +3,7 @@ from .inference_utils import inference from .train_utils import initialize_model_weights, instantiate_architecture from .misc import aggregate_local_weights, save_model_weights +from .transformations import train_transform, test_transform import copy def lcl_train_gnn(lr, trn_loader, val_loader, criterion, cnv_lyr, @@ -44,9 +45,7 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo backbone_model = backbone_model.to(device) gnn_model = gnn_model.to(device) - - ############## Initialize Data Loaders ################# - + ############## Initialize Data Loaders ################# trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) @@ -62,11 +61,9 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone #and identical model weights for the other layers. - glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model) sit0_gnn_wt=copy.deepcopy(gnn_wt) @@ -88,12 +85,10 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo sit3_gnn_wt=checkpoint['sit3_gnn_model'] sit4_gnn_wt=checkpoint['sit4_gnn_model'] - ################ Begin Actual Training ############ max_val=0 for epoch in range(0, max_epochs): print('############ Epoch: '+str(epoch)+' #################') - ###### Load the global model weights ######## cnv_lyr.load_state_dict(glbl_cnv_wt) backbone_model.load_state_dict(glbl_backbone_wt) @@ -145,7 +140,6 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo criterion4, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index4, edge_attr4, device) - avg_val=(prv_val0+prv_val1+prv_val2+prv_val3+prv_val4)/5 print('Avg Val AUC: '+str(avg_val)) @@ -157,7 +151,6 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo sit2_gnn_wt, sit3_gnn_wt, sit4_gnn_wt) print('Validation Performance Improved !') - ############### Compute the global model weights ############# glbl_cnv_wt=aggregate_local_weights(sit0_cnv_wt, sit1_cnv_wt, sit2_cnv_wt, @@ -166,4 +159,14 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, sit3_backbone_wt, sit4_backbone_wt, device) - glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) \ No newline at end of file + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) + +def train_model(config): + if torch.cuda.is_available() and config['gpu']=='True': + device = torch.device('cuda') + else: + device = torch.device('cpu') + trainer_with_GNN(config['lr'],config['batch_size'], config['data'], config['split_npz'], + train_transform, test_transform, config['epochs'], config['backbone'], + device, config['checkpoint'], config['savepath'] ) + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py index 21304c6e09d..94598272083 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py @@ -1,10 +1,9 @@ import unittest import os -import sys -from ..src.utils.train_utils import train_model from ..src.utils.downloader import download_checkpoint, download_data from ..src.utils.get_config import get_config -from ..src.utils.train_utils import train_model +from ..src.utils.train_utils_cnn import train_model +from ..src.utils.train_utils_gnn import train_model as train_model_gnn def create_train_test_for_without_gnn(): class TrainerTest(unittest.TestCase): @@ -37,7 +36,7 @@ def setUpClass(cls): def test_trainer(self): if not os.path.exists(self.config["checkpoint"]): download_checkpoint(gnn=True) - train_model(self.config) + train_model_gnn(self.config) def test_config(self): self.config = get_config(action='train', gnn=True) From 73ad71a77bd45b08f8d1e546a9da86a232d9a9e7 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal Date: Tue, 21 Feb 2023 20:53:20 +0530 Subject: [PATCH 42/47] pylint changes --- .../src/__init__.py | 0 .../src/inference.py | 4 +-- .../src/utils/__init__.py | 0 .../src/utils/dataloader.py | 3 -- .../src/utils/loss.py | 2 +- .../src/utils/misc.py | 3 +- .../src/utils/model.py | 17 +++++------ .../src/utils/train_utils.py | 1 - .../src/utils/train_utils_cnn.py | 28 ++++++++----------- 9 files changed, 24 insertions(+), 34 deletions(-) create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/__init__.py create mode 100644 misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__init__.py diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/__init__.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py index 152ff44e74a..f24c0ab082d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/inference.py @@ -36,8 +36,8 @@ def main(args): parser.add_argument('--gpu', type=str, help='Want GPU ?', required=False, default='False') parser.add_argument('--model_file', type=str, required=False, help='Path of model weights saved for running inference with pytorch') - parser.add_argument('--checkpoint', type=str, - required=False, help='Path of onnx model file to load for inference. Required if run type is onnx or ir') + parser.add_argument('--checkpoint', type=str,required=False, + help='Path of onnx model file to load for inference. Required if run type is onnx or ir') parser.add_argument('--gnn', type=str, help='using gnn or not?', required=False, default='False') parser.add_argument('--backbone', type=str, required=False, default='resnet', diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__init__.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py index a694c83ea06..6523bfd6bda 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py @@ -3,8 +3,6 @@ import torch import numpy as np -################ Dataloader ######################### - class construct_dataset(data.Dataset): def __init__(self, data_pth, split_npz, site, transforms, tn_vl_idx): # site [0,4] or -999 which means global @@ -50,4 +48,3 @@ def __getitem__(self, index): def __len__(self): #print(self.img_pths.shape[0]) return self.img_names.shape[0] - diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py index 5a661d2e75f..3248558cd81 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py @@ -5,7 +5,7 @@ ############# Define the Weighted Loss. The weights are different for each class ######## class Custom_Loss(nn.Module): def __init__(self, site, device=torch.device('cpu')): - super(Custom_Loss, self).__init__() + super().__init__() config = get_config(action='loss') wts_pos = np.array(config[str(site)]['wts_pos']) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py index 62dd3325d1a..8aa6dceb009 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py @@ -132,4 +132,5 @@ def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gn 'sit2_gnn_model': sit2_gnn_wt, 'sit3_gnn_model': sit3_gnn_wt, 'sit4_gnn_model': sit4_gnn_wt, - }, mx_nm) \ No newline at end of file + }, mx_nm) + \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index b5280f82195..c865ef81238 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -11,13 +11,10 @@ ''' Instead of creating an instance of the models within the constructor, We will pass the initial layer for 1 to 3 channel, the backbone model and the FC layers part separately as input arguments. - - This will allow us to simply load the weights for each CNN model separately, - may be useful when updating only part of the network ''' class Fully_Connected_Layer(nn.Module): def __init__(self, inp_dim, ftr_dim): - super(Fully_Connected_Layer, self).__init__() + super().__init__() ftr_lyr=nn.ModuleList() cls_lyr=nn.ModuleList() @@ -56,7 +53,7 @@ def forward(self, x): ############## Conv 1st layer ####################### class First_Conv(nn.Module): def __init__(self): - super(First_Conv, self).__init__() + super().__init__() # Convert 1 channel to 3 channel also can be made unique for each site self.convert_channels=nn.Sequential(nn.Conv2d(1,3,1,1, bias=False), @@ -70,7 +67,7 @@ def forward(self, x): # This MLP will map the edge weight to the weights used to avg. the features from the neighbors class create_mlp(nn.Module): def __init__(self, in_chnl, out): - super(create_mlp, self).__init__() + super().__init__() self.lyr=nn.Sequential( nn.Linear(in_chnl, out, bias=True), @@ -83,7 +80,7 @@ def forward(self, x): # The Resdiual Block for the GNN class Res_Graph_Conv_Lyr(nn.Module): def __init__(self, in_chnls, base_chnls, mlp_model, aggr_md): - super(Res_Graph_Conv_Lyr, self).__init__() + super().__init__() self.GNN_lyr=NNConv(in_chnls, base_chnls, mlp_model, aggr=aggr_md) self.bn=GNN_BatchNorm(base_chnls) @@ -98,7 +95,7 @@ def forward(self, x, edge_index, edge_attr): ############### The Graph Convolution Network ############################ class GNN_Network(nn.Module): def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): - super(GNN_Network, self).__init__() + super().__init__() my_gcn=nn.ModuleList() @@ -142,7 +139,7 @@ def forward(self, data): class GNN_Network_infer(nn.Module): def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): - super(GNN_Network_infer, self).__init__() + super().__init__() my_gcn=nn.ModuleList() # Base channels is actually the fraction of inp. @@ -179,7 +176,7 @@ def forward(self, x, edge_index, edge_attr): ########Combined model for inference and export########### class Infer_model(nn.Module): def __init__(self, backbone,split_path, gnn=True): - super(Infer_model, self).__init__() + super().__init__() self.gnn = gnn if backbone=='densenet': inp_dim=1024 diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index bd03407c128..6f56f7dd734 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -9,7 +9,6 @@ import copy from torch_geometric.data import Data as Data_GNN from torch_geometric.data import DataLoader as DataLoader_GNN -from .transformations import train_transform, test_transform # Train 1 batch update def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py index d50acd910f3..81f38f48b1d 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py @@ -12,18 +12,16 @@ def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc n_batches = 4000 ###### Compute the Validation accuracy ####### prev_val=inference(cnv_lyr1, backbone_model, fc_layers, None, val_loader, criterion, device) - ######## Train the entire network in an end-to-end manner ### train_end_to_end(lr, cnv_lyr1, backbone_model, fc_layers, None, trn_loader, None, n_batches, criterion, device) - cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) backbone_wt=copy.deepcopy(backbone_model.state_dict()) fc_wt=copy.deepcopy(fc_layers.state_dict()) return prev_val, cnv_lyr1_wt,backbone_wt, fc_wt -def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_transform, - test_transform, max_epochs, backbone, device, checkpoint='', savepath=''): +def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_transforms, + test_transforms, max_epochs, backbone, device, checkpoint='', savepath=''): cnv_lyr1, backbone_model, fc_layers = instantiate_architecture(ftr_dim=512, model_name=backbone) cnv_lyr1 = cnv_lyr1.to(device) @@ -31,7 +29,6 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans fc_layers = fc_layers.to(device) if checkpoint!='': checkpoint=torch.load(checkpoint) - ######The wights saved for model without gnn have cnv_lyr1_state_dict instead of cnv_lyr_state_dict.......but for trial weights for with gnn are used here cnv_wt=checkpoint['cnv_lyr_state_dict'] backbone_wt=checkpoint['backbone_model_state_dict'] fc_wt=checkpoint['fc_layers_state_dict'] @@ -41,34 +38,34 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans ### Dataloaders and model weights for each site # Site-0 - data_trn0=construct_dataset(img_pth, split_npz, site=0, transforms=train_transform, tn_vl_idx=0) + data_trn0=construct_dataset(img_pth, split_npz, site=0, transforms=train_transforms, tn_vl_idx=0) trn_loader0=DataLoader(data_trn0,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transform, tn_vl_idx=1) + data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transforms, tn_vl_idx=1) val_loader0=DataLoader(data_val0, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) # Site-1 - data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transform, tn_vl_idx=0) + data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transforms, tn_vl_idx=0) trn_loader1=DataLoader(data_trn1,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transform, tn_vl_idx=1) + data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transforms, tn_vl_idx=1) val_loader1=DataLoader(data_val1, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) # Site-2 - data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transform, tn_vl_idx=0) + data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transforms, tn_vl_idx=0) trn_loader2=DataLoader(data_trn2,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transform, tn_vl_idx=1) + data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transforms, tn_vl_idx=1) val_loader2=DataLoader(data_val2, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) # Site-3 - data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transform, tn_vl_idx=0) + data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transforms, tn_vl_idx=0) trn_loader3=DataLoader(data_trn3,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transform, tn_vl_idx=1) + data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transforms, tn_vl_idx=1) val_loader3=DataLoader(data_val3, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) # Site-4 - data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transform, tn_vl_idx=0) + data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transforms, tn_vl_idx=0) trn_loader4=DataLoader(data_trn4,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transform, tn_vl_idx=1) + data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transforms, tn_vl_idx=1) val_loader4=DataLoader(data_val4, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) @@ -281,7 +278,6 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans 'backbone_model_state_dict': sit4_backbone_wt, 'fc_layers_state_dict': sit4_fc_wt, }, savepath+'final_site4_weights.pth') - return def train_model(config): if torch.cuda.is_available() and config['gpu']=='True': From 2a7481d255efa574faf8580455a8504c3d484838 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal Date: Tue, 21 Feb 2023 22:28:22 +0530 Subject: [PATCH 43/47] pylint changes --- .../src/utils/dataloader.py | 4 +- .../src/utils/inference_utils.py | 53 ++++---- .../src/utils/loss.py | 9 +- .../src/utils/metric.py | 15 ++- .../src/utils/misc.py | 39 +++--- .../src/utils/model.py | 87 +++++++------- .../src/utils/train_utils.py | 60 +++++----- .../src/utils/train_utils_cnn.py | 113 +++++++++--------- .../src/utils/train_utils_gnn.py | 85 +++++++------ .../src/utils/transformations.py | 1 - 10 files changed, 229 insertions(+), 237 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py index 6523bfd6bda..665e9a33ceb 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/dataloader.py @@ -27,7 +27,7 @@ def __init__(self, data_pth, split_npz, site, transforms, tn_vl_idx): self.gt=gt self.transforms=transforms self.data_pth=data_pth - + def __getitem__(self, index): """Take the index of item and returns the image and its labels""" img_nm=self.img_names[index] @@ -43,7 +43,7 @@ def __getitem__(self, index): else: pass sample={'img': image, 'gt': gt, 'img_nm': img_nm} - return sample + return sample def __len__(self): #print(self.img_pths.shape[0]) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py index b139a5a6adf..aaef993155f 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/inference_utils.py @@ -10,7 +10,7 @@ from torch.utils.data import DataLoader from .transformations import test_transform from openvino.inference_engine import IECore -import torchvision.transforms as transforms +from torchvision import transforms from .misc import aggregate_local_weights import os import onnxruntime @@ -22,25 +22,25 @@ def to_numpy(tensor): ################# To be used for inference during training #################### def inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, criterion, device,edge_index=None, edge_attr=None): - + tot_loss=0 # tot_auc=0 gt_lst=[] pred_lst=[] - + cnv_lyr.eval() - backbone_model.eval() + backbone_model.eval() fc_layers.eval() if gnn_model is not None: gnn_model.eval() - + with torch.no_grad(): for count, sample in enumerate(val_loader): img=sample['img'] gt=sample['gt'] img=img.to(device) gt=gt.to(device) - + img_3chnl=cnv_lyr(img) gap_ftr=backbone_model(img_3chnl) ftr_lst, prd=fc_layers(gap_ftr) @@ -49,43 +49,43 @@ def inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, data_lst=[] for k in range(0, ftr_lst.shape[0]): data_lst.append(Data_GNN(x=ftr_lst[k,:,:], edge_index=edge_index, - edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) - + edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) + loader = DataLoader_GNN(data_lst, batch_size=ftr_lst.shape[0]) loader=next(iter(loader)).to(device) gt=loader.y - prd_final=gnn_model(loader) + prd_final=gnn_model(loader) else: prd_final=prd ########Forward Pass ############################################# loss=criterion(prd_final, gt) # Apply the sigmoid prd_final=F.sigmoid(prd_final) - + gt_lst.append(gt.cpu().numpy()) pred_lst.append(prd_final.cpu().numpy()) tot_loss=tot_loss+loss.cpu().numpy() del loss, gt, prd_final, prd - + gt_lst=np.concatenate(gt_lst, axis=1) pred_lst=np.concatenate(pred_lst, axis=1) - + gt_lst=np.transpose(gt_lst) pred_lst=np.transpose(pred_lst) - + # Now compute and display the average count=count+1 # since it began from 0 avg_loss=tot_loss/count - + # sens_lst, spec_lst, acc_lst, auc_lst=compute_performance(pred_lst, gt_lst) _, _, _, auc_lst=compute_performance(pred_lst, gt_lst) avg_auc=np.mean(auc_lst) - + print ("\n Val_Loss: {:.4f}, Avg. AUC: {:.4f}".format(avg_loss, avg_auc)) - metric=avg_auc # this will be monitored for Early Stopping - + metric=avg_auc + cnv_lyr.train() - backbone_model.train() + backbone_model.train() fc_layers.train() if gnn_model is not None: gnn_model.train() @@ -125,8 +125,8 @@ def load_inference_model(config, run_type): model_bin = split_text + ".bin" model_temp = ie.read_network(model_xml, model_bin) model = ie.load_network(network=model_temp, device_name='CPU') - return model + def validate_model(model, config, run_type): # GPU transfer - Only pytorch models needs to be transfered. max_samples = config['max_samples'] @@ -170,21 +170,21 @@ def validate_model(model, config, run_type): prd_final = prd_final.squeeze(0) gt=gt.cpu() loss=criterion(prd_final, gt) - + # Apply the sigmoid prd_final=F.sigmoid(prd_final) - + gt_lst.append(gt.cpu().numpy()) pred_lst.append(prd_final.cpu().numpy()) - - + + tot_loss=tot_loss+loss.cpu().numpy() - + del loss, gt, prd_final if count==max_samples: break - - + + gt_lst=np.concatenate(gt_lst, axis=1) pred_lst=np.concatenate(pred_lst, axis=1) gt_lst=np.transpose(gt_lst) @@ -200,4 +200,3 @@ def validate_model(model, config, run_type): def inference_model(config, run_type): model = load_inference_model(config, run_type) validate_model(model, config, run_type) - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py index 3248558cd81..22925c9780b 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/loss.py @@ -13,20 +13,19 @@ def __init__(self, site, device=torch.device('cpu')): wts_pos = torch.from_numpy(wts_pos) wts_pos = wts_pos.type(torch.Tensor) wts_pos=wts_pos.to(device) # size 1 by cls - + wts_neg = torch.from_numpy(wts_neg) wts_neg = wts_neg.type(torch.Tensor) wts_neg=wts_neg.to(device) # size 1 by cls - + self.wts_pos=wts_pos self.wts_neg=wts_neg self.bce=nn.BCEWithLogitsLoss(reduction='none') - + def forward(self, ypred, ytrue): msk = ((1-ytrue)*self.wts_neg) + (ytrue*self.wts_pos) #1 if ytrue is 0 loss=self.bce(ypred,ytrue) # bsz, cls loss=loss*msk loss=loss.view(-1) # flatten all batches and class losses - loss=torch.mean(loss) + loss=torch.mean(loss) return loss - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py index 0a23bb9d55d..04507630860 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/metric.py @@ -1,7 +1,7 @@ from sklearn.metrics import roc_auc_score import numpy as np def compute_performance(pred, gt): - #This function computes the performance metrics : Accuracy, AUC, Sensitivity and Specificity + #This function computes the performance metrics : Accuracy, AUC, Sensitivity and Specificity acc_lst=[] auc_lst=[] sens_lst=[] @@ -12,30 +12,29 @@ def compute_performance(pred, gt): idx1=np.where(pred_cls>=0.5) pred_cls[idx0]=0 pred_cls[idx1]=1 - - for cls in range(0, pred_scr.shape[1]): + + for cls in range(0, pred_scr.shape[1]): tmp_prd_scr=pred_scr[:,cls] tmp_prd_cls=pred_cls[:, cls] tmp_gt=gt[:, cls] - + TP=np.where((tmp_gt==1) & (tmp_prd_cls==1))[0].shape[0] TN=np.where((tmp_gt==0) & (tmp_prd_cls==0))[0].shape[0] FP=np.where((tmp_gt==0) & (tmp_prd_cls==1))[0].shape[0] FN=np.where((tmp_gt==1) & (tmp_prd_cls==0))[0].shape[0] - + acc=(TP+TN)/(TP+TN+FP+FN) sens=TP/(TP+FN) spec=TN/(TN+FP) auc=roc_auc_score(tmp_gt, tmp_prd_scr) - + sens_lst.append(sens) spec_lst.append(spec) acc_lst.append(acc) auc_lst.append(auc) - + sens_lst=np.array(sens_lst) spec_lst=np.array(spec_lst) acc_lst=np.array(acc_lst) auc_lst=np.array(auc_lst) return sens_lst, spec_lst, acc_lst, auc_lst - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py index 8aa6dceb009..f5f7c98f9e7 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/misc.py @@ -11,33 +11,33 @@ def compute_edge_attr(A): continue edge.append(np.array([j,k])) edge_attr.append(A[j,k]) - + edge=np.array(edge) edge_attr=np.array(edge_attr) - + edge=torch.from_numpy(np.transpose(edge)) edge=edge.long() - + edge_attr=torch.from_numpy(edge_attr) edge_attr=torch.unsqueeze(edge_attr, dim=1) edge_attr=edge_attr.float() - + return edge, edge_attr def compute_adjacency_matrix(adj_type, site, split_npz='/storage/aneesh/split.npz'): # load the npz file - a=np.load(split_npz, allow_pickle=True) + a=np.load(split_npz, allow_pickle=True) gt=a['gt'] clstr_assgn=a['clstr_assgn'] trn_val_tst=a['trn_val_tst'] del a - + if site==-999: idx=np.where(trn_val_tst==0)[0] else: idx=np.where((clstr_assgn==site) & (trn_val_tst==0))[0] gt=gt[idx] - + kappa=np.zeros((14,14)) TP=np.zeros((14,14)) TN=np.zeros((14,14)) @@ -45,27 +45,27 @@ def compute_adjacency_matrix(adj_type, site, split_npz='/storage/aneesh/split.np FN=np.zeros((14,14)) kappa=np.zeros((14,14)) agree=np.zeros((14,14)) - + for j in range(0,14): gt_j=gt[j] for k in range(0, 14): gt_k=gt[k] - + ## Kappa and agree are symmetric ie., A(i,j)=A(j,i) kappa[j,k]=cohen_kappa_score(gt_j, gt_k) agree[j,k]=(np.where(gt_j==gt_k)[0].shape[0])/gt.shape[0] - + # How many times are both j and k =1---> This will be symmetric TP[j,k]=(np.where((gt_j==1) & (gt_k==1))[0].shape[0])/gt.shape[0] # How many times are both j and k=0 ---> This will be symmetric TN[j,k]=(np.where((gt_j==0) & (gt_k==0))[0].shape[0])/gt.shape[0] - + ####### FP and FN will get reversed for A(i,j) and A(j,i) # How many time k is 1 but j is 0 FP[j,k]=(np.where((gt_j==0) & (gt_k==1))[0].shape[0])/gt.shape[0] # How many time k is 0 but j is 1 FN[j,k]=(np.where((gt_j==1) & (gt_k==0))[0].shape[0])/gt.shape[0] - + if adj_type=='kappa': A=kappa elif adj_type=='fraction_agreement': @@ -73,7 +73,7 @@ def compute_adjacency_matrix(adj_type, site, split_npz='/storage/aneesh/split.np elif adj_type=='confusion_matrix': A=np.concatenate((np.expand_dims(TP, axis=2), np.expand_dims(TN, axis=2), np.expand_dims(FP, axis=2), np.expand_dims(FN, axis=2)), axis=2) - + if A.ndim==2: tmp_edge, edge_attr=compute_edge_attr(A) else: @@ -95,7 +95,7 @@ def average_weights(w, cmb_wt, device): wts = torch.tensor(cmb_wt).to(device) wts=wts.float() w_avg = copy.deepcopy(w[0]) - + for key in w_avg.keys(): # for each layer layer = key.split('.')[-1] if layer == 'num_batches_tracked': @@ -109,19 +109,19 @@ def average_weights(w, cmb_wt, device): return w_avg def aggregate_local_weights(wt0, wt1, wt2, wt3, wt4, device): - - wt=average_weights([wt0, wt1, wt2, wt3, wt4], + + wt=average_weights([wt0, wt1, wt2, wt3, wt4], [1.0, 2030.0/1997, 2093.0/1997, 1978.0/1997, 2122.0/1997],device) return wt def compute_lcl_wt(epoch, cmb_wts, glbl_wt, prev_lcl_wt, device): - + cmb_wt=cmb_wts[epoch] lcl_wt=1-cmb_wt wt=average_weights([prev_lcl_wt, glbl_wt], [lcl_wt, cmb_wt],device) return wt - -def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gnn_wt=None, sit1_gnn_wt=None, + +def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gnn_wt=None, sit1_gnn_wt=None, sit2_gnn_wt=None,sit3_gnn_wt=None, sit4_gnn_wt=None): torch.save({ 'cnv_lyr_state_dict': glbl_cnv_wt, @@ -133,4 +133,3 @@ def save_model_weights(mx_nm, glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, sit0_gn 'sit3_gnn_model': sit3_gnn_wt, 'sit4_gnn_model': sit4_gnn_wt, }, mx_nm) - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py index c865ef81238..adb61d7b80e 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/model.py @@ -8,31 +8,31 @@ import torch.nn.functional as F from torchvision import models # Define Architecture -''' Instead of creating an instance of the models within the constructor, - We will pass the initial layer for 1 to 3 channel, the backbone model - and the FC layers part separately as input arguments. +''' Instead of creating an instance of the models within the constructor, + We will pass the initial layer for 1 to 3 channel, the backbone model + and the FC layers part separately as input arguments. ''' class Fully_Connected_Layer(nn.Module): def __init__(self, inp_dim, ftr_dim): super().__init__() - + ftr_lyr=nn.ModuleList() cls_lyr=nn.ModuleList() - - for cls in range(0,14): + + for _ in range(0,14): ftr_lyr.append( nn.Sequential( - nn.Linear(inp_dim, ftr_dim, bias=False), + nn.Linear(inp_dim, ftr_dim, bias=False), nn.BatchNorm1d(ftr_dim), nn.ReLU(), - nn.Linear(ftr_dim, ftr_dim, bias=False), - nn.BatchNorm1d(ftr_dim), + nn.Linear(ftr_dim, ftr_dim, bias=False), + nn.BatchNorm1d(ftr_dim), nn.ReLU() ) ) cls_lyr.append( nn.Sequential( - nn.Linear(ftr_dim, 1, bias=False), + nn.Linear(ftr_dim, 1, bias=False), nn.BatchNorm1d(1) ) ) @@ -49,14 +49,14 @@ def forward(self, x): prd_lst.append(prd) prd=torch.cat(prd_lst, axis=1) return ftr_lst, prd - + ############## Conv 1st layer ####################### class First_Conv(nn.Module): def __init__(self): super().__init__() - + # Convert 1 channel to 3 channel also can be made unique for each site - self.convert_channels=nn.Sequential(nn.Conv2d(1,3,1,1, bias=False), + self.convert_channels=nn.Sequential(nn.Conv2d(1,3,1,1, bias=False), nn.BatchNorm2d(3), nn.ReLU() ) def forward(self, x): @@ -67,8 +67,8 @@ def forward(self, x): # This MLP will map the edge weight to the weights used to avg. the features from the neighbors class create_mlp(nn.Module): def __init__(self, in_chnl, out): - super().__init__() - + super().__init__() + self.lyr=nn.Sequential( nn.Linear(in_chnl, out, bias=True), nn.Tanh() @@ -80,52 +80,52 @@ def forward(self, x): # The Resdiual Block for the GNN class Res_Graph_Conv_Lyr(nn.Module): def __init__(self, in_chnls, base_chnls, mlp_model, aggr_md): - super().__init__() - + super().__init__() + self.GNN_lyr=NNConv(in_chnls, base_chnls, mlp_model, aggr=aggr_md) self.bn=GNN_BatchNorm(base_chnls) - + def forward(self, x, edge_index, edge_attr): h=self.GNN_lyr(x, edge_index, edge_attr) h=self.bn(h) h=F.relu(h) return x+h - + ############### The Graph Convolution Network ############################ class GNN_Network(nn.Module): def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): super().__init__() - + my_gcn=nn.ModuleList() - + # Base channels is actually the fraction of inp. in_chnls=int(in_chnls) base_chnls=int(base_chnls*in_chnls) - + # A GCN to map input channels to base channels dimensions my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, base_chnls, create_mlp(ftr_dim, in_chnls*base_chnls), aggr_md)) - + in_chnls=base_chnls - for k in range(0, depth): + for _ in range(0, depth): out_chnls=int(in_chnls*grwth_rate) # Get a GCN in_chnls=max(in_chnls,1) out_chnls=max(out_chnls,1) my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, out_chnls, create_mlp(ftr_dim,in_chnls*out_chnls), aggr_md)) in_chnls=out_chnls - #### Add the final classification layer that will convert output to 1D + #### Add the final classification layer that will convert output to 1D my_gcn.append(NNConv(in_chnls, 1, create_mlp(ftr_dim, 1*in_chnls), aggr='mean')) - + self.my_gcn=my_gcn self.dpth=depth - + def forward(self, data): - + x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr cnt=0 x=self.my_gcn[cnt](x, edge_index, edge_attr) - for k in range(0, self.dpth): + for _ in range(0, self.dpth): cnt=cnt+1 #print(cnt) x=self.my_gcn[cnt](x, edge_index, edge_attr) @@ -140,37 +140,37 @@ def forward(self, data): class GNN_Network_infer(nn.Module): def __init__(self, in_chnls, base_chnls, grwth_rate, depth, aggr_md, ftr_dim): super().__init__() - + my_gcn=nn.ModuleList() # Base channels is actually the fraction of inp. in_chnls=int(in_chnls) base_chnls=int(base_chnls*in_chnls) - + # A GCN to map input channels to base channels dimensions my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, base_chnls, create_mlp(ftr_dim, in_chnls*base_chnls), aggr_md)) - + in_chnls=base_chnls - for k in range(0, depth): + for _ in range(0, depth): out_chnls=int(in_chnls*grwth_rate) # Get a GCN in_chnls=max(in_chnls,1) out_chnls=max(out_chnls,1) my_gcn.append(Res_Graph_Conv_Lyr(in_chnls, out_chnls, create_mlp(ftr_dim,in_chnls*out_chnls), aggr_md)) in_chnls=out_chnls - #### Add the final classification layer that will convert output to 1D + #### Add the final classification layer that will convert output to 1D my_gcn.append(NNConv(in_chnls, 1, create_mlp(ftr_dim, 1*in_chnls), aggr='mean')) - + self.my_gcn=my_gcn self.dpth=depth - + def forward(self, x, edge_index, edge_attr): cnt=0 x=self.my_gcn[cnt](x, edge_index, edge_attr) - for k in range(0, self.dpth): + for _ in range(0, self.dpth): cnt=cnt+1 x=self.my_gcn[cnt](x, edge_index, edge_attr) cnt=cnt+1 - out=self.my_gcn[cnt](x, edge_index, edge_attr) + out=self.my_gcn[cnt](x, edge_index, edge_attr) return out ########Combined model for inference and export########### @@ -186,12 +186,12 @@ def __init__(self, backbone,split_path, gnn=True): inp_dim=512 backbone_model=models.resnet18(weights='IMAGENET1K_V1') backbone_model.fc=nn.Identity() - + elif backbone=='xception': inp_dim=2048 backbone_model=xception.xception(pretrained=True) backbone_model.fc=nn.Identity() - + cnv_lyr=First_Conv() fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim=512) self.edge_index, self.edge_attr= compute_adjacency_matrix('confusion_matrix', -999, split_path) @@ -211,11 +211,10 @@ def forward(self, x): ftr_list, prd=self.fc_layers(gap_ftr) ftr_list=torch.cat(ftr_list, dim=1) ftr_list = ftr_list[0] - if self.gnn==True: + if self.gnn is True: if x.is_cuda: self.edge_attr=self.edge_attr.cuda() self.edge_index=self.edge_index.cuda() - prd=self.gnn_model(ftr_list, self.edge_index, self.edge_attr) - prd=prd.transpose(1,0) + prd=self.gnn_model(ftr_list, self.edge_index, self.edge_attr) + prd=prd.transpose(1,0) return prd - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py index 6f56f7dd734..0757ed29d90 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils.py @@ -11,36 +11,36 @@ from torch_geometric.data import DataLoader as DataLoader_GNN # Train 1 batch update -def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, +def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, trn_typ, criterion, device, edge_index=None, edge_attr=None): ##Keep gnn_model and optim4 as None if training is to be done without GNN img=sample['img'] gt=sample['gt'] - + img=img.to(device) gt=gt.to(device) - + ########Forward Pass ############## img_3chnl=cnv_lyr(img) gap_ftr=backbone_model(img_3chnl) ftr_lst, prd=fc_layers(gap_ftr) if gnn_model is not None: ftr_lst=torch.cat(ftr_lst, dim=1) - + data_lst=[] for k in range(0, ftr_lst.shape[0]): data_lst.append(Data_GNN(x=ftr_lst[k,:,:], edge_index=edge_index, - edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) + edge_attr=edge_attr, y=torch.unsqueeze(gt[k,:], dim=1))) loader = DataLoader_GNN(data_lst, batch_size=ftr_lst.shape[0]) loader=next(iter(loader)).to(device) gt=loader.y - + prd_final=gnn_model(loader) else: prd_final=prd - + loss=criterion(prd_final, gt) - + ####### Backward Pass ########## ### Remove previous gradients optim1.zero_grad() @@ -51,9 +51,9 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim ### Compute Gradients loss.backward() - + ### Optimizer Gradients - #if training is without gnn + #if training is without gnn if gnn_model is None: optim1.step() optim2.step() @@ -61,19 +61,19 @@ def train_one_batch(sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim return cnv_lyr, backbone_model, fc_layers, loss, optim1, optim2, optim3 #if training is with gnn if trn_typ=='full': - optim1.step() + optim1.step() optim2.step() - + optim3.step() optim4.step() return cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4 #### Train main def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, - train_loader, trn_typ, n_batches, criterion, device, + train_loader, trn_typ, n_batches, criterion, device, edge_index=None, edge_attr=None): - - cnv_lyr.train() + + cnv_lyr.train() backbone_model.train() fc_layers.train() @@ -84,25 +84,25 @@ def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, if gnn_model is not None: gnn_model.train() optim4 = torch.optim.Adam(gnn_model.parameters(), lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5) - + cnt=0 trn_run_loss=0 for i, sample in enumerate(train_loader): cnt=cnt+1 if gnn_model is not None: cnv_lyr, backbone_model, fc_layers, gnn_model, loss, optim1, optim2, optim3, optim4=train_one_batch( - sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, + sample, cnv_lyr, backbone_model, fc_layers, gnn_model, optim1, optim2, optim3, optim4, trn_typ, criterion, device, edge_index, edge_attr) else: #Set gnn_model and optim4 as None if training is to be done without GNN cnv_lyr, backbone_model, fc_layers, loss, optim1, optim2, optim3=train_one_batch( - sample, cnv_lyr, backbone_model, fc_layers, None, optim1, optim2, optim3, None, - trn_typ, criterion, device) + sample, cnv_lyr, backbone_model, fc_layers, None, optim1, optim2, optim3, None, + trn_typ, criterion, device) trn_run_loss=trn_run_loss+loss - + if (i+1) % 20== 0: # displays after every 20 batch updates print ("cnt {}, Train Loss: {:.4f}".format(cnt,(trn_run_loss/(cnt))), end ="\r") - + ############# Monitor Validation Acc and Early Stopping ############ if cnt>=n_batches: break @@ -112,25 +112,25 @@ def train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, return cnv_lyr, backbone_model, fc_layers, gnn_model def initialize_training(site, img_pth, split_npz, train_transform, test_transform, b_sz, device): - + data_trn=construct_dataset(img_pth, split_npz, site, train_transform, tn_vl_idx=0) trn_loader=DataLoader(data_trn,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) - + data_val=construct_dataset(img_pth, split_npz, site, test_transform, tn_vl_idx=1) val_loader=DataLoader(data_val, 1, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - + criterion=Custom_Loss(site, device) edge_index, edge_attr= compute_adjacency_matrix('confusion_matrix', site, split_npz) - + return trn_loader, val_loader, criterion, edge_index, edge_attr def initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model): - + cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) backbone_wt=copy.deepcopy(backbone_model.state_dict()) fc_wt=copy.deepcopy(fc_layers.state_dict()) gnn_wt=copy.deepcopy(gnn_model.state_dict()) - + return cnv_wt, backbone_wt, fc_wt, gnn_wt def instantiate_architecture(ftr_dim, model_name, gnn=False): @@ -143,16 +143,16 @@ def instantiate_architecture(ftr_dim, model_name, gnn=False): inp_dim=512 backbone_model=models.resnet18(pretrained=True) backbone_model.fc=nn.Identity() - + elif model_name=='xception': inp_dim=2048 backbone_model=xception.xception(pretrained=True) backbone_model.fc=nn.Identity() - + cnv_lyr=First_Conv() fc_layers=Fully_Connected_Layer(inp_dim, ftr_dim) if gnn: gnn_model=GNN_Network(in_chnls=512, base_chnls=1, grwth_rate=1, depth=1, aggr_md='mean', ftr_dim=4) return cnv_lyr, backbone_model, fc_layers, gnn_model - + return cnv_lyr, backbone_model, fc_layers diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py index 81f38f48b1d..853728e9865 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_cnn.py @@ -14,7 +14,7 @@ def lcl_train(lr, trn_loader, val_loader, criterion, cnv_lyr1, backbone_model,fc prev_val=inference(cnv_lyr1, backbone_model, fc_layers, None, val_loader, criterion, device) ######## Train the entire network in an end-to-end manner ### train_end_to_end(lr, cnv_lyr1, backbone_model, fc_layers, None, trn_loader, None, n_batches, criterion, device) - + cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) backbone_wt=copy.deepcopy(backbone_model.state_dict()) fc_wt=copy.deepcopy(fc_layers.state_dict()) @@ -42,71 +42,71 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans trn_loader0=DataLoader(data_trn0,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) data_val0=construct_dataset(img_pth, split_npz, site=0, transforms=test_transforms, tn_vl_idx=1) val_loader0=DataLoader(data_val0, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - + # Site-1 data_trn1=construct_dataset(img_pth, split_npz, site=1, transforms=train_transforms, tn_vl_idx=0) trn_loader1=DataLoader(data_trn1,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) data_val1=construct_dataset(img_pth, split_npz, site=1, transforms=test_transforms, tn_vl_idx=1) val_loader1=DataLoader(data_val1, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - + # Site-2 data_trn2=construct_dataset(img_pth, split_npz, site=2, transforms=train_transforms, tn_vl_idx=0) trn_loader2=DataLoader(data_trn2,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) data_val2=construct_dataset(img_pth, split_npz, site=2, transforms=test_transforms, tn_vl_idx=1) val_loader2=DataLoader(data_val2, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - + # Site-3 data_trn3=construct_dataset(img_pth, split_npz, site=3, transforms=train_transforms, tn_vl_idx=0) trn_loader3=DataLoader(data_trn3,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) data_val3=construct_dataset(img_pth, split_npz, site=3, transforms=test_transforms, tn_vl_idx=1) val_loader3=DataLoader(data_val3, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - + + # Site-4 data_trn4=construct_dataset(img_pth, split_npz, site=4, transforms=train_transforms, tn_vl_idx=0) trn_loader4=DataLoader(data_trn4,b_sz, shuffle=True, num_workers=1, pin_memory=False, drop_last=True) data_val4=construct_dataset(img_pth, split_npz, site=4, transforms=test_transforms, tn_vl_idx=1) val_loader4=DataLoader(data_val4, b_sz, shuffle=False, num_workers=1, pin_memory=False, drop_last=True) - - + + criterion = Custom_Loss(site=-999,device=device) - - + + ###### Initialize model weights with pre-trained weights ## Global glbl_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) glbl_backbone_wt=copy.deepcopy(backbone_model.state_dict()) glbl_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ## Site0 sit0_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) sit0_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit0_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ## Site 1 sit1_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) sit1_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit1_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ## Site 2 sit2_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) sit2_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit2_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ## Site 3 sit3_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) sit3_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit3_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ## Site 4 sit4_cnv_lyr1_wt=copy.deepcopy(cnv_lyr1.state_dict()) sit4_backbone_wt=copy.deepcopy(backbone_model.state_dict()) sit4_fc_wt=copy.deepcopy(fc_layers.state_dict()) - + ###### Now begin training max_val=0 for epoch in range(0, max_epochs): - + ############ Perform the local trainings for each site ##### ## Site 0 print('\n \n SITE 0 \n') @@ -117,13 +117,13 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) backbone_model.load_state_dict(tmp_backbone_wt) fc_layers.load_state_dict(tmp_fc_wt) - - prev_val0, sit0_cnv_lyr1_wt,sit0_backbone_wt, sit0_fc_wt=lcl_train(lr, trn_loader0, val_loader0, criterion, + + prev_val0, sit0_cnv_lyr1_wt,sit0_backbone_wt, sit0_fc_wt=lcl_train(lr, trn_loader0, val_loader0, criterion, cnv_lyr1, backbone_model,fc_layers, device ) - + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - + + ## Site 1 print('\n \n SITE 1 \n') tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit1_cnv_lyr1_wt, device) @@ -133,13 +133,13 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) backbone_model.load_state_dict(tmp_backbone_wt) fc_layers.load_state_dict(tmp_fc_wt) - + prev_val1, sit1_cnv_lyr1_wt,sit1_backbone_wt, sit1_fc_wt=lcl_train(lr, trn_loader1, val_loader1, criterion, cnv_lyr1, backbone_model,fc_layers, device ) - + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - + + ## Site 2 print('\n \n SITE 2 \n') tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit2_cnv_lyr1_wt, device) @@ -149,13 +149,13 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) backbone_model.load_state_dict(tmp_backbone_wt) fc_layers.load_state_dict(tmp_fc_wt) - + prev_val2, sit2_cnv_lyr1_wt,sit2_backbone_wt, sit2_fc_wt=lcl_train(lr, trn_loader2, val_loader2, criterion, cnv_lyr1, backbone_model,fc_layers, device ) - + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - + + ## Site 3 print('\n \n SITE 3 \n') tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit3_cnv_lyr1_wt, device) @@ -165,13 +165,13 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) backbone_model.load_state_dict(tmp_backbone_wt) fc_layers.load_state_dict(tmp_fc_wt) - + prev_val3, sit3_cnv_lyr1_wt,sit3_backbone_wt, sit3_fc_wt=lcl_train(lr, trn_loader3, val_loader3, criterion, cnv_lyr1, backbone_model,fc_layers , device) - + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - + + ## Site 4 print('\n \n SITE 4 \n') tmp_cnv_lyr1_wt=compute_lcl_wt(epoch, avg_schedule, glbl_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) @@ -181,15 +181,15 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans cnv_lyr1.load_state_dict(tmp_cnv_lyr1_wt) backbone_model.load_state_dict(tmp_backbone_wt) fc_layers.load_state_dict(tmp_fc_wt) - + prev_val4, sit4_cnv_lyr1_wt,sit4_backbone_wt, sit4_fc_wt=lcl_train(lr, trn_loader4, val_loader4, criterion, cnv_lyr1, backbone_model,fc_layers , device) - + del tmp_cnv_lyr1_wt, tmp_backbone_wt, tmp_fc_wt - - + + avg_val=(prev_val0+prev_val1+prev_val2+prev_val3+prev_val4)/5 - + if avg_val>max_val: max_val=avg_val # save model weight, local weights @@ -198,81 +198,81 @@ def trainer_without_GNN( avg_schedule, lr, b_sz, img_pth, split_npz, train_trans 'backbone_model_state_dict': glbl_backbone_wt, 'fc_layers_state_dict': glbl_fc_wt, }, savepath+'best_glbl_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, 'backbone_model_state_dict': sit0_backbone_wt, 'fc_layers_state_dict': sit0_fc_wt, }, savepath+'best_site0_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, 'backbone_model_state_dict': sit1_backbone_wt, 'fc_layers_state_dict': sit1_fc_wt, }, savepath+'best_site1_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, 'backbone_model_state_dict': sit2_backbone_wt, 'fc_layers_state_dict': sit2_fc_wt, }, savepath+'best_site2_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, 'backbone_model_state_dict': sit3_backbone_wt, 'fc_layers_state_dict': sit3_fc_wt, }, savepath+'best_site3_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, 'backbone_model_state_dict': sit4_backbone_wt, 'fc_layers_state_dict': sit4_fc_wt, }, savepath+'best_site4_weights.pth') - + ######### aggregate to compute global weight ############### - + glbl_cnv_lyr1_wt=aggregate_local_weights(sit0_cnv_lyr1_wt, sit1_cnv_lyr1_wt, sit2_cnv_lyr1_wt, sit3_cnv_lyr1_wt, sit4_cnv_lyr1_wt, device) - + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, sit3_backbone_wt, sit4_backbone_wt, device) - + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) - - + + ###### Just before returning, save the final weights - + # save model weight, local weights torch.save({ 'cnv_lyr1_state_dict': glbl_cnv_lyr1_wt, 'backbone_model_state_dict': glbl_backbone_wt, 'fc_layers_state_dict': glbl_fc_wt, }, savepath+'final_glbl_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit0_cnv_lyr1_wt, 'backbone_model_state_dict': sit0_backbone_wt, 'fc_layers_state_dict': sit0_fc_wt, }, savepath+'final_site0_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit1_cnv_lyr1_wt, 'backbone_model_state_dict': sit1_backbone_wt, 'fc_layers_state_dict': sit1_fc_wt, }, savepath+'final_site1_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit2_cnv_lyr1_wt, 'backbone_model_state_dict': sit2_backbone_wt, 'fc_layers_state_dict': sit2_fc_wt, }, savepath+'final_site2_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit3_cnv_lyr1_wt, 'backbone_model_state_dict': sit3_backbone_wt, 'fc_layers_state_dict': sit3_fc_wt, }, savepath+'final_site3_weights.pth') - + torch.save({ 'cnv_lyr1_state_dict': sit4_cnv_lyr1_wt, 'backbone_model_state_dict': sit4_backbone_wt, @@ -288,4 +288,3 @@ def train_model(config): trainer_without_GNN(avg_schedule, config['lr'], config['batch_size'], config['data'], config['split_npz'], train_transform, test_transform, config['epochs'], config['backbone'], device, config['checkpoint'], config['savepath']) - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py index cf40214bcb6..b1994ab52e5 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/train_utils_gnn.py @@ -8,36 +8,36 @@ def lcl_train_gnn(lr, trn_loader, val_loader, criterion, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index, edge_attr, device): - + n_batches=1500 ####### Freeze and train the part which is specific to each site only print('Freeze global CNN, fine-tune GNN ...') - cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, trn_loader,'gnn', n_batches, criterion, device, edge_index, edge_attr) - + ###### Compute the Validation accuracy ####### print('Computing Validation Performance ...') prev_val=inference(cnv_lyr, backbone_model, fc_layers, gnn_model, val_loader, criterion, device, edge_index, edge_attr) - + ######## Train the entire network in an end-to-end manner ### print('Train end-to-end for Local Site ...') - cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, + cnv_lyr, backbone_model, fc_layers, gnn_model=train_end_to_end(lr, cnv_lyr, backbone_model, fc_layers, gnn_model, trn_loader,'full', 2*n_batches, criterion, device, edge_index, edge_attr) - + cnv_wt=copy.deepcopy(cnv_lyr.state_dict()) backbone_wt=copy.deepcopy(backbone_model.state_dict()) fc_wt=copy.deepcopy(fc_layers.state_dict()) gnn_wt=copy.deepcopy(gnn_model.state_dict()) - + return prev_val, cnv_wt,backbone_wt, fc_wt, gnn_wt -#Main function for training +#Main function for training def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transform, max_epochs, backbone, device, restart_checkpoint='', savepoint=''): - + ###### Instantiate the CNN-GNN Architecture ############## cnv_lyr, backbone_model, fc_layers, gnn_model=instantiate_architecture(ftr_dim=512, model_name=backbone, gnn=True) cnv_lyr = cnv_lyr.to(device) @@ -45,33 +45,33 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo backbone_model = backbone_model.to(device) gnn_model = gnn_model.to(device) - ############## Initialize Data Loaders ################# - trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, + ############## Initialize Data Loaders ################# + trn_loader0, val_loader0, criterion0, edge_index0, edge_attr0=initialize_training(0, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - - trn_loader1, val_loader1, criterion1, edge_index1, edge_attr1=initialize_training(1, img_pth, split_npz, + + trn_loader1, val_loader1, criterion1, edge_index1, edge_attr1=initialize_training(1, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - - trn_loader2, val_loader2, criterion2, edge_index2, edge_attr2=initialize_training(2, img_pth, split_npz, + + trn_loader2, val_loader2, criterion2, edge_index2, edge_attr2=initialize_training(2, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - - trn_loader3, val_loader3, criterion3, edge_index3, edge_attr3=initialize_training(3, img_pth, split_npz, + + trn_loader3, val_loader3, criterion3, edge_index3, edge_attr3=initialize_training(3, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - - trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, + + trn_loader4, val_loader4, criterion4, edge_index4, edge_attr4=initialize_training(4, img_pth, split_npz, train_transform, test_transform, b_sz, device=device) - - ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone + + ### Initialize local and global model weights with the Imagenet pre-trained weights for backbone #and identical model weights for the other layers. - glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, + glbl_cnv_wt, glbl_backbone_wt, glbl_fc_wt, gnn_wt=initialize_model_weights(cnv_lyr, backbone_model, fc_layers, gnn_model) sit0_gnn_wt=copy.deepcopy(gnn_wt) sit1_gnn_wt=copy.deepcopy(gnn_wt) sit2_gnn_wt=copy.deepcopy(gnn_wt) sit3_gnn_wt=copy.deepcopy(gnn_wt) sit4_gnn_wt=copy.deepcopy(gnn_wt) - + del gnn_wt # Load previous checkpoint if resuming the training else comment out if restart_checkpoint!='': @@ -84,7 +84,7 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo sit2_gnn_wt=checkpoint['sit2_gnn_model'] sit3_gnn_wt=checkpoint['sit3_gnn_model'] sit4_gnn_wt=checkpoint['sit4_gnn_model'] - + ################ Begin Actual Training ############ max_val=0 for epoch in range(0, max_epochs): @@ -94,55 +94,55 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo backbone_model.load_state_dict(glbl_backbone_wt) fc_layers.load_state_dict(glbl_fc_wt) gnn_model.load_state_dict(sit0_gnn_wt) - + print('\n \n SITE 0 \n') prv_val0, sit0_cnv_wt,sit0_backbone_wt, sit0_fc_wt, sit0_gnn_wt=lcl_train_gnn(lr, trn_loader0, val_loader0, criterion0, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index0, edge_attr0, device) - + cnv_lyr.load_state_dict(glbl_cnv_wt) backbone_model.load_state_dict(glbl_backbone_wt) fc_layers.load_state_dict(glbl_fc_wt) gnn_model.load_state_dict(sit1_gnn_wt) - + print('\n \n SITE 1 \n') - prv_val1, sit1_cnv_wt,sit1_backbone_wt, sit1_fc_wt, sit1_gnn_wt=lcl_train_gnn(lr, trn_loader1, val_loader1, + prv_val1, sit1_cnv_wt,sit1_backbone_wt, sit1_fc_wt, sit1_gnn_wt=lcl_train_gnn(lr, trn_loader1, val_loader1, criterion1, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index1, edge_attr1, device) - + cnv_lyr.load_state_dict(glbl_cnv_wt) backbone_model.load_state_dict(glbl_backbone_wt) fc_layers.load_state_dict(glbl_fc_wt) gnn_model.load_state_dict(sit2_gnn_wt) - + print('\n \n SITE 2 \n') - prv_val2, sit2_cnv_wt,sit2_backbone_wt, sit2_fc_wt, sit2_gnn_wt=lcl_train_gnn(lr, trn_loader2, val_loader2, + prv_val2, sit2_cnv_wt,sit2_backbone_wt, sit2_fc_wt, sit2_gnn_wt=lcl_train_gnn(lr, trn_loader2, val_loader2, criterion2, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index2, edge_attr2, device) - + cnv_lyr.load_state_dict(glbl_cnv_wt) backbone_model.load_state_dict(glbl_backbone_wt) fc_layers.load_state_dict(glbl_fc_wt) gnn_model.load_state_dict(sit3_gnn_wt) - + print('\n \n SITE 3 \n') - prv_val3, sit3_cnv_wt,sit3_backbone_wt, sit3_fc_wt, sit3_gnn_wt=lcl_train_gnn(lr, trn_loader3, val_loader3, + prv_val3, sit3_cnv_wt,sit3_backbone_wt, sit3_fc_wt, sit3_gnn_wt=lcl_train_gnn(lr, trn_loader3, val_loader3, criterion3, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index3, edge_attr3, device) - + cnv_lyr.load_state_dict(glbl_cnv_wt) backbone_model.load_state_dict(glbl_backbone_wt) fc_layers.load_state_dict(glbl_fc_wt) gnn_model.load_state_dict(sit4_gnn_wt) - + print('\n \n SITE 4 \n') - prv_val4, sit4_cnv_wt,sit4_backbone_wt, sit4_fc_wt, sit4_gnn_wt=lcl_train_gnn(lr, trn_loader4, val_loader4, + prv_val4, sit4_cnv_wt,sit4_backbone_wt, sit4_fc_wt, sit4_gnn_wt=lcl_train_gnn(lr, trn_loader4, val_loader4, criterion4, cnv_lyr, backbone_model,fc_layers, gnn_model, edge_index4, edge_attr4, device) avg_val=(prv_val0+prv_val1+prv_val2+prv_val3+prv_val4)/5 print('Avg Val AUC: '+str(avg_val)) - + if avg_val>max_val: max_val=avg_val mx_nm=savepoint+'best_weight_'+str(max_val)+'_'+str(epoch)+'.pt' @@ -150,15 +150,15 @@ def trainer_with_GNN(lr, b_sz, img_pth, split_npz, train_transform, test_transfo glbl_fc_wt, sit0_gnn_wt, sit1_gnn_wt, sit2_gnn_wt, sit3_gnn_wt, sit4_gnn_wt) print('Validation Performance Improved !') - + ############### Compute the global model weights ############# - + glbl_cnv_wt=aggregate_local_weights(sit0_cnv_wt, sit1_cnv_wt, sit2_cnv_wt, sit3_cnv_wt, sit4_cnv_wt, device) - + glbl_backbone_wt=aggregate_local_weights(sit0_backbone_wt, sit1_backbone_wt, sit2_backbone_wt, sit3_backbone_wt, sit4_backbone_wt, device) - + glbl_fc_wt=aggregate_local_weights(sit0_fc_wt, sit1_fc_wt, sit2_fc_wt, sit3_fc_wt, sit4_fc_wt, device) def train_model(config): @@ -169,4 +169,3 @@ def train_model(config): trainer_with_GNN(config['lr'],config['batch_size'], config['data'], config['split_npz'], train_transform, test_transform, config['epochs'], config['backbone'], device, config['checkpoint'], config['savepath'] ) - \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py index 42dac7091b8..529a50e0fde 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/src/utils/transformations.py @@ -13,4 +13,3 @@ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]) ]) - \ No newline at end of file From 181fad0053bde645f415971471c347d7263d5e2f Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal Date: Tue, 21 Feb 2023 22:34:08 +0530 Subject: [PATCH 44/47] Update test_inference.py --- .../test/test_inference.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py index b07547d8a7a..cdb8ab9447a 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -21,16 +21,6 @@ def test_pytorch_inference(self): config = get_config(action='inference', gnn=True) inference_model(config,'pytorch') - - def test_onnx_inference(self): - - config = get_config(action='inference', gnn=True) - inference_model(config,'onnx') - - def test_ir_inference(self): - - config = get_config(action='inference', gnn=True) - inference_model(config,'ir') return InferenceTest From fdb9563f521a6b9eb8f3595828e6816281406d74 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal Date: Wed, 22 Feb 2023 16:39:49 +0530 Subject: [PATCH 45/47] Updating dataset links --- .../chest_xray_screening_federated_gcn/README.md | 2 +- .../configs/download_configs.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index d365a162e4f..a56c8cc08a4 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -99,7 +99,7 @@ federated_chest_screening/ ### Run Tests -Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase1/phase1.zip) and [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi34/phase2/phase2.zip). +Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/data_samples.zip) ## Acknowledgement diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json index 8680da85e48..2cf196c7637 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/configs/download_configs.json @@ -1,8 +1,8 @@ { "data":{ "dest_path_data": "dataset/data/data.zip", - "url_data":"", - "url_split": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/model.zip", + "url_data":"http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/data_samples.zip", + "url_split": "http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/split.npz", "dest_path_split": "dataset/split.zip" }, "fl_with_gnn":{ From 3ead981ec918e852849ef702df29fe2979057706 Mon Sep 17 00:00:00 2001 From: Aditya Kasliwal Date: Wed, 22 Feb 2023 16:43:11 +0530 Subject: [PATCH 46/47] Update README.md --- .../chest_xray_screening_federated_gcn/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index a56c8cc08a4..e6583024733 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -36,7 +36,7 @@ The overall performance of the proposed CNN-GNN is ## Model -Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/high_low/weights.zip). +Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model_weights_w_gnn.zip). Note: The ONNX and IR representation models accepts inputs of fixed size mentioned in configuration file. This needs to be updated based on the input size. From 591d2ce88513a4a0b326209ddc725a790fccc378 Mon Sep 17 00:00:00 2001 From: Rakshith2597 Date: Thu, 23 Feb 2023 15:33:21 +0530 Subject: [PATCH 47/47] updated readme --- .../README.md | 34 ++++++++++++++++--- .../requirements.txt | 4 +-- .../test/test_export.py | 6 ++-- .../test/test_inference.py | 6 ++-- .../test/test_train.py | 8 ++--- 5 files changed, 40 insertions(+), 18 deletions(-) diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md index e6583024733..cfa252c132f 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/README.md @@ -36,9 +36,11 @@ The overall performance of the proposed CNN-GNN is ## Model -Download `.pth` checkpoint for CNN-GNN model trained on CheXpert dataset with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model_weights_w_gnn.zip). +Download `.pth` checkpoint for CNN-GNN model with the following [link](http://kliv.iitkgp.ac.in/projects/miriad/model_weights/bmi8/model_weights_w_gnn.zip). -Note: The ONNX and IR representation models accepts inputs of fixed size mentioned in configuration file. This needs to be updated based on the input size. +> Note: The ONNX and IR representation models accepts inputs of fixed size mentioned in configuration file. This needs to be updated based on the input size. + +> Note: PyTorch to ONNX conversion is not currently supported for GNN models. Hence, export to ONNX and IR is currently disabled for the GNN. ## Setup @@ -49,7 +51,6 @@ Note: The ONNX and IR representation models accepts inputs of fixed size mention ## Code and Directory Organisation - ``` federated_chest_screening/ src/ @@ -96,8 +97,25 @@ federated_chest_screening/ 5. **tests** directory contains unit tests. 6. **config** directory contains model configurations for the network. +## Create Environment + +``` +sh init_venv.sh +source venv/bin/activate + +``` +In addition to the packages mentioned in requirements.txt, users are requested to install additional packages using + +``` +pip install pyg-lib torch-scatter torch-sparse -f https://data.pyg.org/whl/torch-${TORCH}+${CUDA}.html +pip install torch-geometric + +``` + +where `${TORCH}` and `${CUDA}` should be replaced by the specific PyTorch and CUDA versions, respectively -### Run Tests + +## Run Tests Necessary unit tests have been provided in the tests directory. The sample/toy dataset to be used in the tests can also be downloaded from [here](http://kliv.iitkgp.ac.in/projects/miriad/sample_data/bmi8/data_samples.zip) @@ -110,7 +128,7 @@ India Grand Challenge 2016 for Project MIRIAD. **Principal Investigators** -Dr Debdoot Sheet
+Dr Debdoot Sheet, Dr Nirmalya Ghosh (Co-PI)
Department of Electrical Engineering,
Indian Institute of Technology Kharagpur
email: debdoot@ee.iitkgp.ac.in @@ -135,6 +153,12 @@ Indian Institute of Technology Kharagpur
email: rakshith.sathish@kgpian.iitkgp.ac.in
Github username: Rakshith2597 + Anupam Borthakur,
+Center of Excellence in Artifical Intelligence,
+Indian Institute of Technology Kharagpur
+email: ANUPAMBORTHAKUR@kgpian.iitkgp.ac.in
+Github username: anupam-kliv + ## References diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt index 5988cbca843..e4bb955e6dc 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/requirements.txt @@ -1,4 +1,4 @@ -torch==1.12.1 +torch==1.13.1 torchvision==0.13.1 torchmetrics pydicom @@ -10,8 +10,6 @@ openvino-dev[onnx]==2021.4.2 onnxruntime==1.8.1 numpy==1.19.2 matplotlib==3.5.2 -termcolor==1.1.0 -dahuffman wget tqdm pytest \ No newline at end of file diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py index 153aabccf60..287a7404fb3 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_export.py @@ -1,8 +1,8 @@ import unittest import os -from ..src.utils.downloader import download_checkpoint -from ..src.utils.exporter import Exporter -from ..src.utils.get_config import get_config +from src.utils.downloader import download_checkpoint +from src.utils.exporter import Exporter +from src.utils.get_config import get_config def create_export_test_without_gnn(): class ExportTest(unittest.TestCase): diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py index cdb8ab9447a..39d3300770b 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_inference.py @@ -3,10 +3,10 @@ import unittest import torchvision import sys -from ..src.utils.inference_utils import inference_model +from src.utils.inference_utils import inference_model from torch.utils.data import DataLoader -from ..src.utils.get_config import get_config -from ..src.utils.downloader import download_data +from src.utils.get_config import get_config +from src.utils.downloader import download_data def create_inference_test_with_gnn(): class InferenceTest(unittest.TestCase): diff --git a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py index 94598272083..bde391b54db 100644 --- a/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py +++ b/misc/pytorch_toolkit/chest_xray_screening_federated_gcn/test/test_train.py @@ -1,9 +1,9 @@ import unittest import os -from ..src.utils.downloader import download_checkpoint, download_data -from ..src.utils.get_config import get_config -from ..src.utils.train_utils_cnn import train_model -from ..src.utils.train_utils_gnn import train_model as train_model_gnn +from src.utils.downloader import download_checkpoint, download_data +from src.utils.get_config import get_config +from src.utils.train_utils_cnn import train_model +from src.utils.train_utils_gnn import train_model as train_model_gnn def create_train_test_for_without_gnn(): class TrainerTest(unittest.TestCase):