From 343130692f724438ab4368d9d95b9ca4a91d67c9 Mon Sep 17 00:00:00 2001 From: madgagarin Date: Sun, 31 May 2020 17:37:38 +0300 Subject: [PATCH 1/6] Code upgrade for TensorFlow 2.0 --- README.md | 1 + scripts/count_ops.py | 2 +- scripts/evaluate.py | 8 +- scripts/graph_pb2tb.py | 6 +- scripts/label_image.py | 18 ++-- scripts/quantize_graph.py | 9 +- scripts/retrain.py | 179 +++++++++++++++++++------------------- 7 files changed, 112 insertions(+), 111 deletions(-) diff --git a/README.md b/README.md index 37f3ab0aa..ffb21692e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Overview +#"TensorFlow for poets 2" for TensorFlow 2.0 This repo contains code for the "TensorFlow for poets 2" series of codelabs. diff --git a/scripts/count_ops.py b/scripts/count_ops.py index e771f166b..42cd73562 100644 --- a/scripts/count_ops.py +++ b/scripts/count_ops.py @@ -25,7 +25,7 @@ def load_graph(file_name): with open(file_name,'rb') as f: content = f.read() - graph_def = tf.GraphDef() + graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(content) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, name='') diff --git a/scripts/evaluate.py b/scripts/evaluate.py index 2d2ca3c97..1fa74b096 100644 --- a/scripts/evaluate.py +++ b/scripts/evaluate.py @@ -31,7 +31,7 @@ def evaluate_graph(graph_file_name): with load_graph(graph_file_name).as_default() as graph: - ground_truth_input = tf.placeholder( + ground_truth_input = tf.compat.v1.placeholder( tf.float32, [None, 5], name='GroundTruthInput') image_buffer_input = graph.get_tensor_by_name('input:0') @@ -39,8 +39,8 @@ def evaluate_graph(graph_file_name): accuracy, _ = retrain.add_evaluation_step(final_tensor, ground_truth_input) logits = graph.get_tensor_by_name("final_training_ops/Wx_plus_b/add:0") - xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( - labels = ground_truth_input, + xent = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits( + labels = tf.stop_gradient( ground_truth_input), logits = logits)) image_dir = 'tf_files/flower_photos' @@ -68,7 +68,7 @@ def evaluate_graph(graph_file_name): accuracies = [] xents = [] - with tf.Session(graph=graph) as sess: + with tf.compat.v1.Session(graph=graph) as sess: for filename, ground_truth in zip(filenames, ground_truths): image = Image.open(filename).resize((224,224),Image.ANTIALIAS) image = np.array(image, dtype=np.float32)[None,...] diff --git a/scripts/graph_pb2tb.py b/scripts/graph_pb2tb.py index 8e462c313..6c6e55c6e 100644 --- a/scripts/graph_pb2tb.py +++ b/scripts/graph_pb2tb.py @@ -22,7 +22,7 @@ def load_graph(graph_pb_path): with open(graph_pb_path,'rb') as f: content = f.read() - graph_def = tf.GraphDef() + graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(content) with tf.Graph().as_default() as graph: tf.import_graph_def(graph_def, name='') @@ -30,8 +30,8 @@ def load_graph(graph_pb_path): def graph_to_tensorboard(graph, out_dir): - with tf.Session(): - train_writer = tf.summary.FileWriter(out_dir) + with tf.compat.v1.Session(): + train_writer = tf.compat.v1.summary.FileWriter(out_dir) train_writer.add_graph(graph) diff --git a/scripts/label_image.py b/scripts/label_image.py index 214c4ece5..2c6d7d28d 100644 --- a/scripts/label_image.py +++ b/scripts/label_image.py @@ -26,7 +26,7 @@ def load_graph(model_file): graph = tf.Graph() - graph_def = tf.GraphDef() + graph_def = tf.compat.v1.GraphDef() with open(model_file, "rb") as f: graph_def.ParseFromString(f.read()) @@ -39,7 +39,7 @@ def read_tensor_from_image_file(file_name, input_height=299, input_width=299, input_mean=0, input_std=255): input_name = "file_reader" output_name = "normalized" - file_reader = tf.read_file(file_name, input_name) + file_reader = tf.io.read_file(file_name, input_name) if file_name.endswith(".png"): image_reader = tf.image.decode_png(file_reader, channels = 3, name='png_reader') @@ -53,16 +53,16 @@ def read_tensor_from_image_file(file_name, input_height=299, input_width=299, name='jpeg_reader') float_caster = tf.cast(image_reader, tf.float32) dims_expander = tf.expand_dims(float_caster, 0); - resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width]) + resized = tf.image.resize(dims_expander, [input_height, input_width], method=tf.image.ResizeMethod.BILINEAR) normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std]) - sess = tf.Session() + sess = tf.compat.v1.Session() result = sess.run(normalized) return result def load_labels(label_file): label = [] - proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines() + proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines() for l in proto_as_ascii_lines: label.append(l.rstrip()) return label @@ -71,11 +71,11 @@ def load_labels(label_file): file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg" model_file = "tf_files/retrained_graph.pb" label_file = "tf_files/retrained_labels.txt" - input_height = 224 - input_width = 224 + input_height = 299 + input_width = 299 input_mean = 128 input_std = 128 - input_layer = "input" + input_layer = "Mul" output_layer = "final_result" parser = argparse.ArgumentParser() @@ -121,7 +121,7 @@ def load_labels(label_file): input_operation = graph.get_operation_by_name(input_name); output_operation = graph.get_operation_by_name(output_name); - with tf.Session(graph=graph) as sess: + with tf.compat.v1.Session(graph=graph) as sess: start = time.time() results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t}) diff --git a/scripts/quantize_graph.py b/scripts/quantize_graph.py index 2dcc12ad7..9da356e31 100644 --- a/scripts/quantize_graph.py +++ b/scripts/quantize_graph.py @@ -31,6 +31,7 @@ import collections import re import numpy as np +import tensorflow as tf from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 @@ -584,7 +585,7 @@ def eightbitize_nodes_recursively(self, current_node): quantize_input = False if current_node.op in ("MatMul", "Conv2D", "BiasAdd", "MaxPool", "AvgPool", "Relu", "Relu6", - "BatchNormWithGlobalNormalization"): + tf.nn.batch_normalization()): quantize_input = True elif current_node.op == "Concat" and i > 0: quantize_input = ( @@ -616,7 +617,7 @@ def eightbitize_nodes_recursively(self, current_node): elif (current_node.op == "Concat" and dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32): self.eightbitize_concat_node(current_node) - elif current_node.op == "BatchNormWithGlobalNormalization": + elif current_node.op == tf.nn.batch_normalization(): self.eightbitize_batch_norm_node(current_node) elif (current_node.op == "Reshape" and dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32): @@ -1044,7 +1045,7 @@ def eightbitize_batch_norm_node(self, original_node): self.eightbitize_input_to_node(namespace_prefix, original_gamma_name, reshape_dims_name, reduction_dims_name)) quantized_batch_norm_node = create_node( - "QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name, + tf.nn.batch_normalization(), quantized_batch_norm_name, [ quantize_input_name, min_input_name, max_input_name, quantize_mean_name, min_mean_name, max_mean_name, @@ -1187,7 +1188,7 @@ def apply_final_node_renames(self): def remove_dead_nodes(self, output_names): """Removes nodes that are no longer needed for inference from the graph.""" old_output_graph = self.output_graph - self.output_graph = graph_util.extract_sub_graph(old_output_graph, + self.output_graph = tf.compat.v1.graph_util.extract_sub_graph(old_output_graph, output_names) def quantize_weights(self, input_graph, quantization_mode): diff --git a/scripts/retrain.py b/scripts/retrain.py index 5fa9b0f13..23dc969e0 100644 --- a/scripts/retrain.py +++ b/scripts/retrain.py @@ -108,7 +108,6 @@ from six.moves import urllib import tensorflow as tf -from tensorflow.python.framework import graph_util from tensorflow.python.framework import tensor_shape from tensorflow.python.platform import gfile from tensorflow.python.util import compat @@ -139,7 +138,7 @@ def create_image_lists(image_dir, testing_percentage, validation_percentage): into training, testing, and validation sets within each label. """ if not gfile.Exists(image_dir): - tf.logging.error("Image directory '" + image_dir + "' not found.") + tf.compat.v1.logging.error("Image directory '" + image_dir + "' not found.") return None result = collections.OrderedDict() sub_dirs = [ @@ -148,23 +147,23 @@ def create_image_lists(image_dir, testing_percentage, validation_percentage): sub_dirs = sorted(item for item in sub_dirs if gfile.IsDirectory(item)) for sub_dir in sub_dirs: - extensions = ['jpg', 'jpeg', 'JPG', 'JPEG'] + extensions = ['jpg', 'jpeg', 'JPG', 'JPEG', 'png', 'PNG'] file_list = [] dir_name = os.path.basename(sub_dir) if dir_name == image_dir: continue - tf.logging.info("Looking for images in '" + dir_name + "'") + tf.compat.v1.logging.info("Looking for images in '" + dir_name + "'") for extension in extensions: file_glob = os.path.join(image_dir, dir_name, '*.' + extension) file_list.extend(gfile.Glob(file_glob)) if not file_list: - tf.logging.warning('No files found') + tf.compat.v1.logging.warning('No files found') continue if len(file_list) < 20: - tf.logging.warning( + tf.compat.v1.logging.warning( 'WARNING: Folder has less than 20 images, which may cause issues.') elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS: - tf.logging.warning( + tf.compat.v1.logging.warning( 'WARNING: Folder {} has more than {} images. Some images will ' 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS)) label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower()) @@ -223,13 +222,13 @@ def get_image_path(image_lists, label_name, index, image_dir, category): """ if label_name not in image_lists: - tf.logging.fatal('Label does not exist %s.', label_name) + tf.compat.v1.logging.fatal('Label does not exist %s.', label_name) label_lists = image_lists[label_name] if category not in label_lists: - tf.logging.fatal('Category does not exist %s.', category) + tf.compat.v1.logging.fatal('Category does not exist %s.', category) category_list = label_lists[category] if not category_list: - tf.logging.fatal('Label %s has no images in the category %s.', + tf.compat.v1.logging.fatal('Label %s has no images in the category %s.', label_name, category) mod_index = index % len(category_list) base_name = category_list[mod_index] @@ -271,8 +270,8 @@ def create_model_graph(model_info): """ with tf.Graph().as_default() as graph: model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name']) - with gfile.FastGFile(model_path, 'rb') as f: - graph_def = tf.GraphDef() + with gfile.GFile(model_path, 'rb') as f: + graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(f.read()) bottleneck_tensor, resized_input_tensor = (tf.import_graph_def( graph_def, @@ -335,7 +334,7 @@ def _progress(count, block_size, total_size): filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress) print() statinfo = os.stat(filepath) - tf.logging.info('Successfully downloaded', filename, statinfo.st_size, + tf.compat.v1.logging.info('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory) @@ -358,12 +357,12 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Create a single bottleneck file.""" - tf.logging.info('Creating bottleneck at ' + bottleneck_path) + tf.compat.v1.logging.info('Creating bottleneck at ' + bottleneck_path) image_path = get_image_path(image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): - tf.logging.fatal('File does not exist %s', image_path) - image_data = gfile.FastGFile(image_path, 'rb').read() + tf.compat.v1.logging.fatal('File does not exist %s', image_path) + image_data = tf.gfile.GFile(image_path, 'rb').read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, @@ -422,7 +421,7 @@ def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir, try: bottleneck_values = [float(x) for x in bottleneck_string.split(',')] except ValueError: - tf.logging.warning('Invalid float found, recreating bottleneck') + tf.compat.v1.logging.warning('Invalid float found, recreating bottleneck') did_hit_error = True if did_hit_error: create_bottleneck_file(bottleneck_path, image_lists, label_name, index, @@ -477,7 +476,7 @@ def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir, how_many_bottlenecks += 1 if how_many_bottlenecks % 100 == 0: - tf.logging.info( + tf.compat.v1.logging.info( str(how_many_bottlenecks) + ' bottleneck files created.') @@ -588,7 +587,7 @@ def get_random_distorted_bottlenecks( image_path = get_image_path(image_lists, label_name, image_index, image_dir, category) if not gfile.Exists(image_path): - tf.logging.fatal('File does not exist %s', image_path) + tf.compat.v1.logging.fatal('File does not exist %s', image_path) jpeg_data = gfile.FastGFile(image_path, 'rb').read() # Note that we materialize the distorted_image_data as a numpy array before # sending running inference on the image. This involves 2 memory copies and @@ -683,14 +682,14 @@ def add_input_distortions(flip_left_right, random_crop, random_scale, The jpeg input layer and the distorted result tensor. """ - jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput') + jpeg_data = tf.compat.v1.placeholder(tf.string, name='DistortJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) margin_scale = 1.0 + (random_crop / 100.0) resize_scale = 1.0 + (random_scale / 100.0) margin_scale_value = tf.constant(margin_scale) - resize_scale_value = tf.random_uniform(tensor_shape.scalar(), + resize_scale_value = tf.random.uniform(tensor_shape.scalar(), minval=1.0, maxval=resize_scale) scale_value = tf.multiply(margin_scale_value, resize_scale_value) @@ -698,10 +697,10 @@ def add_input_distortions(flip_left_right, random_crop, random_scale, precrop_height = tf.multiply(scale_value, input_height) precrop_shape = tf.stack([precrop_height, precrop_width]) precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32) - precropped_image = tf.image.resize_bilinear(decoded_image_4d, - precrop_shape_as_int) - precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0]) - cropped_image = tf.random_crop(precropped_image_3d, + precropped_image = tf.image.resize(decoded_image_4d, + precrop_shape_as_int, method=tf.image.ResizeMethod.BILINEAR) + precropped_image_3d = tf.squeeze(precropped_image, axis=[0]) + cropped_image = tf.image.random_crop(precropped_image_3d, [input_height, input_width, input_depth]) if flip_left_right: flipped_image = tf.image.random_flip_left_right(cropped_image) @@ -709,7 +708,7 @@ def add_input_distortions(flip_left_right, random_crop, random_scale, flipped_image = cropped_image brightness_min = 1.0 - (random_brightness / 100.0) brightness_max = 1.0 + (random_brightness / 100.0) - brightness_value = tf.random_uniform(tensor_shape.scalar(), + brightness_value = tf.random.uniform(tensor_shape.scalar(), minval=brightness_min, maxval=brightness_max) brightened_image = tf.multiply(flipped_image, brightness_value) @@ -721,15 +720,15 @@ def add_input_distortions(flip_left_right, random_crop, random_scale, def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" - with tf.name_scope('summaries'): - mean = tf.reduce_mean(var) - tf.summary.scalar('mean', mean) - with tf.name_scope('stddev'): - stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) - tf.summary.scalar('stddev', stddev) - tf.summary.scalar('max', tf.reduce_max(var)) - tf.summary.scalar('min', tf.reduce_min(var)) - tf.summary.histogram('histogram', var) + with tf.compat.v1.name_scope('summaries'): + mean = tf.reduce_mean(input_tensor=var) + tf.compat.v1.summary.scalar('mean', mean) + with tf.compat.v1.name_scope('stddev'): + stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var - mean))) + tf.compat.v1.summary.scalar('stddev', stddev) + tf.compat.v1.summary.scalar('max', tf.reduce_max(input_tensor=var)) + tf.compat.v1.summary.scalar('min', tf.reduce_min(input_tensor=var)) + tf.compat.v1.summary.histogram('histogram', var) def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor, @@ -754,46 +753,46 @@ def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor, The tensors for the training and cross entropy results, and tensors for the bottleneck input and ground truth input. """ - with tf.name_scope('input'): - bottleneck_input = tf.placeholder_with_default( + with tf.compat.v1.name_scope('input'): + bottleneck_input = tf.compat.v1.placeholder_with_default( bottleneck_tensor, shape=[None, bottleneck_tensor_size], name='BottleneckInputPlaceholder') - ground_truth_input = tf.placeholder(tf.float32, + ground_truth_input = tf.compat.v1.placeholder(tf.float32, [None, class_count], name='GroundTruthInput') # Organizing the following ops as `final_training_ops` so they're easier # to see in TensorBoard layer_name = 'final_training_ops' - with tf.name_scope(layer_name): - with tf.name_scope('weights'): - initial_value = tf.truncated_normal( + with tf.compat.v1.name_scope(layer_name): + with tf.compat.v1.name_scope('weights'): + initial_value = tf.random.truncated_normal( [bottleneck_tensor_size, class_count], stddev=0.001) layer_weights = tf.Variable(initial_value, name='final_weights') variable_summaries(layer_weights) - with tf.name_scope('biases'): + with tf.compat.v1.name_scope('biases'): layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases') variable_summaries(layer_biases) - with tf.name_scope('Wx_plus_b'): + with tf.compat.v1.name_scope('Wx_plus_b'): logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases - tf.summary.histogram('pre_activations', logits) + tf.compat.v1.summary.histogram('pre_activations', logits) final_tensor = tf.nn.softmax(logits, name=final_tensor_name) - tf.summary.histogram('activations', final_tensor) + tf.compat.v1.summary.histogram('activations', final_tensor) - with tf.name_scope('cross_entropy'): + with tf.compat.v1.name_scope('cross_entropy'): cross_entropy = tf.nn.softmax_cross_entropy_with_logits( - labels=ground_truth_input, logits=logits) - with tf.name_scope('total'): - cross_entropy_mean = tf.reduce_mean(cross_entropy) - tf.summary.scalar('cross_entropy', cross_entropy_mean) + labels=tf.stop_gradient(ground_truth_input), logits=logits) + with tf.compat.v1.name_scope('total'): + cross_entropy_mean = tf.reduce_mean(input_tensor=cross_entropy) + tf.compat.v1.summary.scalar('cross_entropy', cross_entropy_mean) - with tf.name_scope('train'): - optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate) + with tf.compat.v1.name_scope('train'): + optimizer = tf.compat.v1.train.GradientDescentOptimizer(FLAGS.learning_rate) train_step = optimizer.minimize(cross_entropy_mean) return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input, @@ -811,30 +810,30 @@ def add_evaluation_step(result_tensor, ground_truth_tensor): Returns: Tuple of (evaluation step, prediction). """ - with tf.name_scope('accuracy'): - with tf.name_scope('correct_prediction'): - prediction = tf.argmax(result_tensor, 1) + with tf.compat.v1.name_scope('accuracy'): + with tf.compat.v1.name_scope('correct_prediction'): + prediction = tf.argmax(input=result_tensor, axis=1) correct_prediction = tf.equal( - prediction, tf.argmax(ground_truth_tensor, 1)) - with tf.name_scope('accuracy'): - evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) - tf.summary.scalar('accuracy', evaluation_step) + prediction, tf.argmax(input=ground_truth_tensor, axis=1)) + with tf.compat.v1.name_scope('accuracy'): + evaluation_step = tf.reduce_mean(input_tensor=tf.cast(correct_prediction, tf.float32)) + tf.compat.v1.summary.scalar('accuracy', evaluation_step) return evaluation_step, prediction def save_graph_to_file(sess, graph, graph_file_name): - output_graph_def = graph_util.convert_variables_to_constants( + output_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) - with gfile.FastGFile(graph_file_name, 'wb') as f: + with gfile.GFile(graph_file_name, 'wb') as f: f.write(output_graph_def.SerializeToString()) return def prepare_file_system(): # Setup the directory we'll write summaries to for TensorBoard - if tf.gfile.Exists(FLAGS.summaries_dir): - tf.gfile.DeleteRecursively(FLAGS.summaries_dir) - tf.gfile.MakeDirs(FLAGS.summaries_dir) + if tf.io.gfile.exists(FLAGS.summaries_dir): + tf.io.gfile.rmtree(FLAGS.summaries_dir) + tf.io.gfile.makedirs(FLAGS.summaries_dir) if FLAGS.intermediate_store_frequency > 0: ensure_dir_exists(FLAGS.intermediate_output_graphs_dir) return @@ -874,13 +873,13 @@ def create_model_info(architecture): elif architecture.startswith('mobilenet_'): parts = architecture.split('_') if len(parts) != 3 and len(parts) != 4: - tf.logging.error("Couldn't understand architecture name '%s'", + tf.compat.v1.logging.error("Couldn't understand architecture name '%s'", architecture) return None version_string = parts[1] if (version_string != '1.0' and version_string != '0.75' and version_string != '0.50' and version_string != '0.25'): - tf.logging.error( + tf.compat.v1.logging.error( """"The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25', but found '%s' for architecture '%s'""", version_string, architecture) @@ -888,7 +887,7 @@ def create_model_info(architecture): size_string = parts[2] if (size_string != '224' and size_string != '192' and size_string != '160' and size_string != '128'): - tf.logging.error( + tf.compat.v1.logging.error( """The Mobilenet input size should be '224', '192', '160', or '128', but found '%s' for architecture '%s'""", size_string, architecture) @@ -897,7 +896,7 @@ def create_model_info(architecture): is_quantized = False else: if parts[3] != 'quantized': - tf.logging.error( + tf.compat.v1.logging.error( "Couldn't understand architecture suffix '%s' for '%s'", parts[3], architecture) return None @@ -919,7 +918,7 @@ def create_model_info(architecture): input_mean = 127.5 input_std = 127.5 else: - tf.logging.error("Couldn't understand architecture name '%s'", architecture) + tf.compat.v1.logging.error("Couldn't understand architecture name '%s'", architecture) raise ValueError('Unknown architecture', architecture) return { @@ -951,14 +950,14 @@ def add_jpeg_decoding(input_width, input_height, input_depth, input_mean, Tensors for the node to feed JPEG data into, and the output of the preprocessing steps. """ - jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput') + jpeg_data = tf.compat.v1.placeholder(tf.string, name='DecodeJPGInput') decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth) decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32) decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0) resize_shape = tf.stack([input_height, input_width]) resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32) - resized_image = tf.image.resize_bilinear(decoded_image_4d, - resize_shape_as_int) + resized_image = tf.image.resize(decoded_image_4d, + resize_shape_as_int, method=tf.image.ResizeMethod.BILINEAR) offset_image = tf.subtract(resized_image, input_mean) mul_image = tf.multiply(offset_image, 1.0 / input_std) return jpeg_data, mul_image @@ -967,7 +966,7 @@ def add_jpeg_decoding(input_width, input_height, input_depth, input_mean, def main(_): # Needed to make sure the logging output is visible. # See https://github.com/tensorflow/tensorflow/issues/3047 - tf.logging.set_verbosity(tf.logging.INFO) + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) # Prepare necessary directories that can be used during training prepare_file_system() @@ -975,7 +974,7 @@ def main(_): # Gather information about the model architecture we'll be using. model_info = create_model_info(FLAGS.architecture) if not model_info: - tf.logging.error('Did not recognize architecture flag') + tf.compat.v1.logging.error('Did not recognize architecture flag') return -1 # Set up the pre-trained graph. @@ -988,10 +987,10 @@ def main(_): FLAGS.validation_percentage) class_count = len(image_lists.keys()) if class_count == 0: - tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir) + tf.compat.v1.logging.error('No valid folders of images found at ' + FLAGS.image_dir) return -1 if class_count == 1: - tf.logging.error('Only one valid folder of images found at ' + + tf.compat.v1.logging.error('Only one valid folder of images found at ' + FLAGS.image_dir + ' - multiple classes are needed for classification.') return -1 @@ -1001,7 +1000,7 @@ def main(_): FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale, FLAGS.random_brightness) - with tf.Session(graph=graph) as sess: + with tf.compat.v1.Session(graph=graph) as sess: # Set up the image decoding sub-graph. jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding( model_info['input_width'], model_info['input_height'], @@ -1035,15 +1034,15 @@ def main(_): final_tensor, ground_truth_input) # Merge all the summaries and write them out to the summaries_dir - merged = tf.summary.merge_all() - train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph) - validation_writer = tf.summary.FileWriter( + validation_writer = tf.compat.v1.summary.FileWriter( FLAGS.summaries_dir + '/validation') # Set up all our weights to their initial default values. - init = tf.global_variables_initializer() + init = tf.compat.v1.global_variables_initializer() sess.run(init) # Run the training for as many cycles as requested on the command line. @@ -1078,9 +1077,9 @@ def main(_): [evaluation_step, cross_entropy], feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth}) - tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' % + tf.compat.v1.logging.info('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i, train_accuracy * 100)) - tf.logging.info('%s: Step %d: Cross entropy = %f' % + tf.compat.v1.logging.info('%s: Step %d: Cross entropy = %f' % (datetime.now(), i, cross_entropy_value)) validation_bottlenecks, validation_ground_truth, _ = ( get_random_cached_bottlenecks( @@ -1095,7 +1094,7 @@ def main(_): feed_dict={bottleneck_input: validation_bottlenecks, ground_truth_input: validation_ground_truth}) validation_writer.add_summary(validation_summary, i) - tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % + tf.compat.v1.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' % (datetime.now(), i, validation_accuracy * 100, len(validation_bottlenecks))) @@ -1106,7 +1105,7 @@ def main(_): and i > 0): intermediate_file_name = (FLAGS.intermediate_output_graphs_dir + 'intermediate_' + str(i) + '.pb') - tf.logging.info('Save intermediate result to : ' + + tf.compat.v1.logging.info('Save intermediate result to : ' + intermediate_file_name) save_graph_to_file(sess, graph, intermediate_file_name) @@ -1122,21 +1121,21 @@ def main(_): [evaluation_step, prediction], feed_dict={bottleneck_input: test_bottlenecks, ground_truth_input: test_ground_truth}) - tf.logging.info('Final test accuracy = %.1f%% (N=%d)' % + tf.compat.v1.logging.info('Final test accuracy = %.1f%% (N=%d)' % (test_accuracy * 100, len(test_bottlenecks))) if FLAGS.print_misclassified_test_images: - tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===') + tf.compat.v1.logging.info('=== MISCLASSIFIED TEST IMAGES ===') for i, test_filename in enumerate(test_filenames): if predictions[i] != test_ground_truth[i].argmax(): - tf.logging.info('%70s %s' % + tf.compat.v1.logging.info('%70s %s' % (test_filename, list(image_lists.keys())[predictions[i]])) # Write out the trained graph and labels with the weights stored as # constants. save_graph_to_file(sess, graph, FLAGS.output_graph) - with gfile.FastGFile(FLAGS.output_labels, 'w') as f: + with gfile.GFile(FLAGS.output_labels, 'w') as f: f.write('\n'.join(image_lists.keys()) + '\n') @@ -1323,4 +1322,4 @@ def main(_): for more information on Mobilenet.\ """) FLAGS, unparsed = parser.parse_known_args() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) + tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed) From d4c0864ecd656523fd7f568df1e8142358d22837 Mon Sep 17 00:00:00 2001 From: Duane Gran Date: Wed, 13 Jan 2021 09:09:39 -0500 Subject: [PATCH 2/6] Update label_image.py Running this with TensorFlow 2.4 on a Raspberry Pi I was getting a "RuntimeError: The Session graph is empty" notice and the label operation wouldn't work. I tried various things to fix it, but the only thing that seemed to work was disabling eager execution. I'm not an expert with TensorFlow so I'm open to other solutions if this is a problem. Just sharing that in my case it resolved the problem and allowed image classification to work again. --- scripts/label_image.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/label_image.py b/scripts/label_image.py index 2c6d7d28d..87073eca7 100644 --- a/scripts/label_image.py +++ b/scripts/label_image.py @@ -23,6 +23,7 @@ import numpy as np import tensorflow as tf +tf.compat.v1.disable_eager_execution() def load_graph(model_file): graph = tf.Graph() From 38bf691175ccafb97855329a0d5987581befb4e2 Mon Sep 17 00:00:00 2001 From: madgagarin <65716701+madgagarin@users.noreply.github.com> Date: Thu, 22 Apr 2021 19:04:53 +0300 Subject: [PATCH 3/6] Update retrain.py --- scripts/retrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/retrain.py b/scripts/retrain.py index 23dc969e0..1a4a6ff83 100644 --- a/scripts/retrain.py +++ b/scripts/retrain.py @@ -362,7 +362,7 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): tf.compat.v1.logging.fatal('File does not exist %s', image_path) - image_data = tf.gfile.GFile(image_path, 'rb').read() + image_data = tf.io.gfile.exists(image_path, 'rb').read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, From 4d4853ed5987532584839ac98f761c6acac1de30 Mon Sep 17 00:00:00 2001 From: madgagarin Date: Thu, 22 Apr 2021 19:18:21 +0300 Subject: [PATCH 4/6] up to 2.4 --- scripts/retrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/retrain.py b/scripts/retrain.py index 1a4a6ff83..87efc3325 100644 --- a/scripts/retrain.py +++ b/scripts/retrain.py @@ -362,7 +362,7 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): tf.compat.v1.logging.fatal('File does not exist %s', image_path) - image_data = tf.io.gfile.exists(image_path, 'rb').read() + image_data = tf.io.gfile.exists(image_path).read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, From cfe56fd73a9a2f18720101792f260a32f175da93 Mon Sep 17 00:00:00 2001 From: madgagarin Date: Thu, 22 Apr 2021 19:22:10 +0300 Subject: [PATCH 5/6] up to 2.4 --- scripts/retrain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/retrain.py b/scripts/retrain.py index 87efc3325..13f3705d8 100644 --- a/scripts/retrain.py +++ b/scripts/retrain.py @@ -362,7 +362,7 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): tf.compat.v1.logging.fatal('File does not exist %s', image_path) - image_data = tf.io.gfile.exists(image_path).read() + image_data = tf.io.gfile.exists(image_path) try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, From 2f22f32ed2f52f1b8881034588bac4b6c5ae00a3 Mon Sep 17 00:00:00 2001 From: madgagarin Date: Thu, 22 Apr 2021 19:49:05 +0300 Subject: [PATCH 6/6] up to 2.4 --- scripts/retrain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/retrain.py b/scripts/retrain.py index 13f3705d8..7599f0065 100644 --- a/scripts/retrain.py +++ b/scripts/retrain.py @@ -362,14 +362,13 @@ def create_bottleneck_file(bottleneck_path, image_lists, label_name, index, image_dir, category) if not gfile.Exists(image_path): tf.compat.v1.logging.fatal('File does not exist %s', image_path) - image_data = tf.io.gfile.exists(image_path) + image_data = gfile.FastGFile(image_path, 'rb').read() try: bottleneck_values = run_bottleneck_on_image( sess, image_data, jpeg_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor) except Exception as e: - raise RuntimeError('Error during processing file %s (%s)' % (image_path, - str(e))) + raise RuntimeError('Error during processing file %s (%s)' % (image_path, str(e))) bottleneck_string = ','.join(str(x) for x in bottleneck_values) with open(bottleneck_path, 'w') as bottleneck_file: bottleneck_file.write(bottleneck_string) @@ -985,6 +984,7 @@ def main(_): # Look at the folder structure, and create lists of all the images. image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage, FLAGS.validation_percentage) + print(image_lists) class_count = len(image_lists.keys()) if class_count == 0: tf.compat.v1.logging.error('No valid folders of images found at ' + FLAGS.image_dir)