From cad8210c556b6bbaaaeafe6114d886aba7c06f74 Mon Sep 17 00:00:00 2001 From: Alexander Krull Date: Wed, 7 Aug 2019 16:54:49 +0200 Subject: [PATCH 1/5] outputfiles in predict script are named according to input files --- n2v/internals/N2V_DataGenerator.py | 2 +- scripts/predictN2V.py | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/n2v/internals/N2V_DataGenerator.py b/n2v/internals/N2V_DataGenerator.py index 7ba9d94..305093d 100644 --- a/n2v/internals/N2V_DataGenerator.py +++ b/n2v/internals/N2V_DataGenerator.py @@ -101,7 +101,7 @@ def load_imgs_from_directory(self, directory, filter='*.tif', dims='YX'): """ files = glob(join(directory, filter)) - files.sort() + files.sort() return self.load_imgs(files, dims=dims) diff --git a/scripts/predictN2V.py b/scripts/predictN2V.py index 42a55a5..cde237b 100644 --- a/scripts/predictN2V.py +++ b/scripts/predictN2V.py @@ -1,13 +1,14 @@ import os import sys import argparse +from glob import glob parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--baseDir", help="directory in which all your network will live", default='models') parser.add_argument("--name", help="name of your network", default='N2V3D') parser.add_argument("--dataPath", help="The path to your data") parser.add_argument("--fileName", help="name of your data file", default="*.tif") -parser.add_argument("--output", help="The path to your data to be saved", default='predictions.tif') +parser.add_argument("--output", help="The path to which your data is to be saved", default='.') parser.add_argument("--dims", help="dimensions of your data", default='YX') parser.add_argument("--tile", help="will cut your image [TILE] times in every dimension to make it fit GPU memory", default=1, type=int) @@ -45,9 +46,10 @@ datagen = N2V_DataGenerator() imgs = datagen.load_imgs_from_directory(directory = args.dataPath, dims=args.dims, filter=args.fileName) -for i, img in enumerate(imgs): - print("img.shape",img.shape) +files = glob(os.path.join(args.dataPath, args.fileName)) +files.sort() +for i, img in enumerate(imgs): img_=img if not 'C' in args.dims : img_=img[...,0] @@ -59,17 +61,16 @@ for j in range(img_.shape[0]): - print("img_[j].shape", img_[j].shape) pred[j] = model.predict( img_[j], axes=myDims, n_tiles=tiles) else: img_=img_[0,...] - print("denoising image "+str(i) +" of "+str(len(imgs))) + print("denoising image "+str(i+1) +" of "+str(len(imgs))) # Denoise the image. print(args.dims) pred = model.predict( img_, axes=args.dims, n_tiles=tiles) print(pred.shape) - filename=args.output - if len(imgs) > 1: - filename=filename+'_'+str(i).zfill(4) +'.tif' + outpath=args.output + filename=os.path.basename(files[i]).replace('.tif','_N2V.tif') + outpath=os.path.join(outpath,filename) imwrite(filename,pred.astype(np.float32)) From 0a7a8da9a435ebb4cb292cf7e5abdf83869d604a Mon Sep 17 00:00:00 2001 From: Alexander Krull Date: Wed, 7 Aug 2019 16:58:38 +0200 Subject: [PATCH 2/5] adding entrypoint for scripts --- setup.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup.py b/setup.py index 58f88eb..3140ea8 100755 --- a/setup.py +++ b/setup.py @@ -37,6 +37,10 @@ 'Programming Language :: Python :: 3.6', ], + scripts=['scripts/scripts/trainN2V.py', + 'scripts/scripts/predictN2V.py' + ] + install_requires=[ "numpy", "scipy", From 2c897cfcb8888e4759281ab64f410f2684c023bb Mon Sep 17 00:00:00 2001 From: Alexander Krull Date: Wed, 7 Aug 2019 17:19:45 +0200 Subject: [PATCH 3/5] entrypoint for script is working --- scripts/predictN2V.py | 2 + scripts/trainN2V.py | 104 ------------------------------------------ setup.py | 6 +-- 3 files changed, 5 insertions(+), 107 deletions(-) delete mode 100644 scripts/trainN2V.py diff --git a/scripts/predictN2V.py b/scripts/predictN2V.py index cde237b..a4ef5d6 100644 --- a/scripts/predictN2V.py +++ b/scripts/predictN2V.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import os import sys import argparse diff --git a/scripts/trainN2V.py b/scripts/trainN2V.py deleted file mode 100644 index 8d625c4..0000000 --- a/scripts/trainN2V.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import sys -import argparse - -parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument("--baseDir", help="base directory in which your network will live", default='models') -parser.add_argument("--name", help="name of your network", default='N2V3D') -parser.add_argument("--dataPath", help="The path to your training data") -parser.add_argument("--fileName", help="name of your training data file", default="*.tif") -parser.add_argument("--validationFraction", help="Fraction of data you want to use for validation (percent)", default=10.0, type=float) -parser.add_argument("--dims", help="dimensions of your data, can include: X,Y,Z,C (channel), T (time)", default='YX') -parser.add_argument("--patchSizeXY", help="XY-size of your training patches", default=64, type=int) -parser.add_argument("--patchSizeZ", help="Z-size of your training patches", default=64, type=int) -parser.add_argument("--epochs", help="number of training epochs", default=100, type=int) -parser.add_argument("--stepsPerEpoch", help="number training steps per epoch", default=5, type=int) -parser.add_argument("--batchSize", help="size of your training batches", default=64, type=int) -parser.add_argument("--netDepth", help="depth of your U-Net", default=2, type=int) -parser.add_argument("--netKernelSize", help="Size of conv. kernels in first layer", default=3, type=int) -parser.add_argument("--n2vPercPix", help="percentage of pixels to manipulated by N2V", default=1.6, type=float) -parser.add_argument("--learningRate", help="initial learning rate", default=0.0004, type=float) - - -if len(sys.argv)==1: - parser.print_help(sys.stderr) - sys.exit(1) - -args = parser.parse_args() -print(args) - -from n2v.models import N2VConfig, N2V -print('everything imported') -import numpy as np -from csbdeep.utils import plot_history -from n2v.utils.n2v_utils import manipulate_val_data -from n2v.internals.N2V_DataGenerator import N2V_DataGenerator -from matplotlib import pyplot as plt -import urllib -import os -import zipfile - -from tifffile import imread -from tifffile import imwrite - - -import glob -print('everything imported') - - -print("args",str(args.name)) - - - -#################################################### -# PREPARE TRAINING DATA -#################################################### - - -datagen = N2V_DataGenerator() -imgs = datagen.load_imgs_from_directory(directory = args.dataPath, dims=args.dims, filter=args.fileName) -print("imgs.shape",imgs[0].shape) - -# Here we extract patches for training and validation. -pshape=( args.patchSizeXY, args.patchSizeXY) -if 'Z' in args.dims: - pshape=(args.patchSizeZ, args.patchSizeXY, args.patchSizeXY) - -print(pshape) -patches = datagen.generate_patches_from_list(imgs[:1], shape=pshape) -print(patches.shape) - -# The patches are non-overlapping, so we can split them into train and validation data. -frac= int( (len(patches))*float(args.validationFraction)/100.0) -print("total no. of patches: "+str(len(patches)) + "\ttraining patches: "+str(len(patches)-frac)+"\tvalidation patches: "+str(frac)) -X = patches[frac:] -X_val = patches[:frac] - - - -config = N2VConfig(X, unet_kern_size=args.netKernelSize, - train_steps_per_epoch=int(args.stepsPerEpoch),train_epochs=int(args.epochs), train_loss='mse', batch_norm=True, - train_batch_size=args.batchSize, n2v_perc_pix=args.n2vPercPix, n2v_patch_shape=pshape, - n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate=args.learningRate, - unet_n_depth=args.netDepth, - ) - -# Let's look at the parameters stored in the config-object. -vars(config) - - -# a name used to identify the model -model_name = args.name -# the base directory in which our model will live -basedir = args.baseDir -# We are now creating our network model. -model = N2V(config=config, name=model_name, basedir=basedir) - - - -#################################################### -# Train Network -#################################################### -print("begin training") -history = model.train(X, X_val) - diff --git a/setup.py b/setup.py index 3140ea8..d117c4d 100755 --- a/setup.py +++ b/setup.py @@ -37,9 +37,9 @@ 'Programming Language :: Python :: 3.6', ], - scripts=['scripts/scripts/trainN2V.py', - 'scripts/scripts/predictN2V.py' - ] + scripts=['scripts/trainN2V.py', + 'scripts/predictN2V.py' + ], install_requires=[ "numpy", From dc9b09ee1b7fa0036703c5d22aff0cc76a615af0 Mon Sep 17 00:00:00 2001 From: Alexander Krull Date: Wed, 7 Aug 2019 17:23:28 +0200 Subject: [PATCH 4/5] adding missing script --- scripts/predictN2V.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/predictN2V.py b/scripts/predictN2V.py index a4ef5d6..cdc735d 100644 --- a/scripts/predictN2V.py +++ b/scripts/predictN2V.py @@ -7,7 +7,7 @@ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--baseDir", help="directory in which all your network will live", default='models') -parser.add_argument("--name", help="name of your network", default='N2V3D') +parser.add_argument("--name", help="name of your network", default='N2V2D') parser.add_argument("--dataPath", help="The path to your data") parser.add_argument("--fileName", help="name of your data file", default="*.tif") parser.add_argument("--output", help="The path to which your data is to be saved", default='.') From 771274e6e02c499a588ae24b1fc95ed6f436a6b9 Mon Sep 17 00:00:00 2001 From: Alexander Krull Date: Wed, 7 Aug 2019 17:32:34 +0200 Subject: [PATCH 5/5] adding missing file --- scripts/trainN2V.py | 106 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 scripts/trainN2V.py diff --git a/scripts/trainN2V.py b/scripts/trainN2V.py new file mode 100644 index 0000000..6d1b53e --- /dev/null +++ b/scripts/trainN2V.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import os +import sys +import argparse + +parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument("--baseDir", help="base directory in which your network will live", default='models') +parser.add_argument("--name", help="name of your network", default='N2V3D') +parser.add_argument("--dataPath", help="The path to your training data") +parser.add_argument("--fileName", help="name of your training data file", default="*.tif") +parser.add_argument("--validationFraction", help="Fraction of data you want to use for validation (percent)", default=10.0, type=float) +parser.add_argument("--dims", help="dimensions of your data, can include: X,Y,Z,C (channel), T (time)", default='YX') +parser.add_argument("--patchSizeXY", help="XY-size of your training patches", default=64, type=int) +parser.add_argument("--patchSizeZ", help="Z-size of your training patches", default=64, type=int) +parser.add_argument("--epochs", help="number of training epochs", default=100, type=int) +parser.add_argument("--stepsPerEpoch", help="number training steps per epoch", default=5, type=int) +parser.add_argument("--batchSize", help="size of your training batches", default=64, type=int) +parser.add_argument("--netDepth", help="depth of your U-Net", default=2, type=int) +parser.add_argument("--netKernelSize", help="Size of conv. kernels in first layer", default=3, type=int) +parser.add_argument("--n2vPercPix", help="percentage of pixels to manipulated by N2V", default=1.6, type=float) +parser.add_argument("--learningRate", help="initial learning rate", default=0.0004, type=float) + + +if len(sys.argv)==1: + parser.print_help(sys.stderr) + sys.exit(1) + +args = parser.parse_args() +print(args) + +from n2v.models import N2VConfig, N2V +print('everything imported') +import numpy as np +from csbdeep.utils import plot_history +from n2v.utils.n2v_utils import manipulate_val_data +from n2v.internals.N2V_DataGenerator import N2V_DataGenerator +from matplotlib import pyplot as plt +import urllib +import os +import zipfile + +from tifffile import imread +from tifffile import imwrite + + +import glob +print('everything imported') + + +print("args",str(args.name)) + + + +#################################################### +# PREPARE TRAINING DATA +#################################################### + + +datagen = N2V_DataGenerator() +imgs = datagen.load_imgs_from_directory(directory = args.dataPath, dims=args.dims, filter=args.fileName) +print("imgs.shape",imgs[0].shape) + +# Here we extract patches for training and validation. +pshape=( args.patchSizeXY, args.patchSizeXY) +if 'Z' in args.dims: + pshape=(args.patchSizeZ, args.patchSizeXY, args.patchSizeXY) + +print(pshape) +patches = datagen.generate_patches_from_list(imgs[:1], shape=pshape) +print(patches.shape) + +# The patches are non-overlapping, so we can split them into train and validation data. +frac= int( (len(patches))*float(args.validationFraction)/100.0) +print("total no. of patches: "+str(len(patches)) + "\ttraining patches: "+str(len(patches)-frac)+"\tvalidation patches: "+str(frac)) +X = patches[frac:] +X_val = patches[:frac] + + + +config = N2VConfig(X, unet_kern_size=args.netKernelSize, + train_steps_per_epoch=int(args.stepsPerEpoch),train_epochs=int(args.epochs), train_loss='mse', batch_norm=True, + train_batch_size=args.batchSize, n2v_perc_pix=args.n2vPercPix, n2v_patch_shape=pshape, + n2v_manipulator='uniform_withCP', n2v_neighborhood_radius=5, train_learning_rate=args.learningRate, + unet_n_depth=args.netDepth, + ) + +# Let's look at the parameters stored in the config-object. +vars(config) + + +# a name used to identify the model +model_name = args.name +# the base directory in which our model will live +basedir = args.baseDir +# We are now creating our network model. +model = N2V(config=config, name=model_name, basedir=basedir) + + + +#################################################### +# Train Network +#################################################### +print("begin training") +history = model.train(X, X_val) +