Skip to content

Commit

Permalink
Did new training with Confusion matrix at each step
Browse files Browse the repository at this point in the history
  • Loading branch information
nlght committed May 9, 2019
1 parent cd1aa1d commit 5263b8f
Show file tree
Hide file tree
Showing 6 changed files with 238 additions and 4 deletions.
Binary file added Matrix.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
93 changes: 89 additions & 4 deletions all_preprocessing_done.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,17 @@
from keras.utils import to_categorical
from keras import backend as K
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import keras.callbacks as kcb
from SPECtogram import gimmeDaSPECtogram
from keras_visual_callbacks import ConfusionMatrixPlotter

# Second dimension of the feature is dim2
feature_dim_2 = 12

# Save data to array file first
save_data_to_array(max_len=feature_dim_2)
#save_data_to_array(max_len=feature_dim_2)

# # Loading train set and test set
X_train, X_test, y_train, y_test = get_train_test()
Expand All @@ -23,7 +27,7 @@
batch_size = 100
verbose = 1
labels_local, _, _ = get_labels()
num_classes = len(labels_local)
num_classes = 10

# Reshaping to perform 2D convolution
X_train = X_train.reshape(X_train.shape[0], feature_dim_1, feature_dim_2, channel)
Expand Down Expand Up @@ -63,7 +67,15 @@ def predict(filepath, model):


model = get_model()
model.fit(X_train, y_train_hot, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(X_test, y_test_hot))
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
modelCheckpoint = keras.callbacks.ModelCheckpoint("./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
confusionMatrixPlotter = ConfusionMatrixPlotter(X_val=X_test, Y_val=y_test_hot, classes=labels_local)
model.fit(X_train, y_train_hot, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(X_test, y_test_hot),
callbacks=[
modelCheckpoint,
reduce_lr,
confusionMatrixPlotter
])

# serialize model to JSON
model_json = model.to_json()
Expand Down Expand Up @@ -113,4 +125,77 @@ def freeze_session(session, keep_var_names=None, output_names=None, clear_device

frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.outputs])

tf.train.write_graph(frozen_graph, "/home/night/PycharmProjects/APMiniProject/", "my_model_4.pb", as_text=False)
tf.train.write_graph(frozen_graph, "/home/night/PycharmProjects/APMiniProject/", "my_model_4.pb", as_text=False)








def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'

# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[labels_local(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')

print(cm)

fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')

# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")

# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax


np.set_printoptions(precision=2)

# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, predict(), classes=labels_local,
title='Confusion matrix, without normalization')

# Plot normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=labels_local, normalize=True,
title='Normalized confusion matrix')

plt.show()
148 changes: 148 additions & 0 deletions keras_visual_callbacks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
from keras.callbacks import Callback
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from sklearn.metrics import confusion_matrix
import itertools
import numpy as np


class AccLossPlotter(Callback):
"""Plot training Accuracy and Loss values on a Matplotlib graph.
The graph is updated by the 'on_epoch_end' event of the Keras Callback class
# Arguments
graphs: list with some or all of ('acc', 'loss')
save_graph: Save graph as an image on Keras Callback 'on_train_end' event
"""

def __init__(self, graphs=['acc', 'loss'], save_graph=False):
self.graphs = graphs
self.num_subplots = len(graphs)
self.save_graph = save_graph


def on_train_begin(self, logs={}):
self.acc = []
self.val_acc = []
self.loss = []
self.val_loss = []
self.epoch_count = 0
plt.ion()
plt.show()


def on_epoch_end(self, epoch, logs={}):
self.epoch_count += 1
self.val_acc.append(logs.get('val_acc'))
self.acc.append(logs.get('acc'))
self.loss.append(logs.get('loss'))
self.val_loss.append(logs.get('val_loss'))
epochs = [x for x in range(self.epoch_count)]

count_subplots = 0

if 'acc' in self.graphs:
count_subplots += 1
plt.subplot(self.num_subplots, 1, count_subplots)
plt.title('Accuracy')
#plt.axis([0,100,0,1])
plt.plot(epochs, self.val_acc, color='r')
plt.plot(epochs, self.acc, color='b')
plt.ylabel('accuracy')

red_patch = mpatches.Patch(color='red', label='Test')
blue_patch = mpatches.Patch(color='blue', label='Train')

plt.legend(handles=[red_patch, blue_patch], loc=4)

if 'loss' in self.graphs:
count_subplots += 1
plt.subplot(self.num_subplots, 1, count_subplots)
plt.title('Loss')
#plt.axis([0,100,0,5])
plt.plot(epochs, self.val_loss, color='r')
plt.plot(epochs, self.loss, color='b')
plt.ylabel('loss')

red_patch = mpatches.Patch(color='red', label='Test')
blue_patch = mpatches.Patch(color='blue', label='Train')

plt.legend(handles=[red_patch, blue_patch], loc=4)

plt.draw()
plt.pause(0.001)

def on_train_end(self, logs={}):
if self.save_graph:
plt.savefig('training_acc_loss.png')

class ConfusionMatrixPlotter(Callback):
"""Plot the confusion matrix on a graph and update after each epoch
# Arguments
X_val: The input values
Y_val: The expected output values
classes: The categories as a list of string names
normalize: True - normalize to [0,1], False - keep as is
cmap: Specify matplotlib colour map
title: Graph Title
"""
def __init__(self, X_val, Y_val, classes, normalize=False, cmap=plt.cm.Blues, title='Confusion Matrix'):
self.X_val = X_val
self.Y_val = Y_val
self.title = title
self.classes = classes
self.normalize = normalize
self.cmap = cmap
plt.ion()
#plt.show()
#plt.figure()

plt.title(self.title)



def on_train_begin(self, logs={}):
pass


def on_epoch_end(self, epoch, logs={}):
plt.clf()
pred = self.model.predict(self.X_val)
max_pred = np.argmax(pred, axis=1)
max_y = np.argmax(self.Y_val, axis=1)
cnf_mat = confusion_matrix(max_y, max_pred)

if self.normalize:
cnf_mat = cnf_mat.astype('float') / cnf_mat.sum(axis=1)[:, np.newaxis]

thresh = cnf_mat.max() / 2.
for i, j in itertools.product(range(cnf_mat.shape[0]), range(cnf_mat.shape[1])):
plt.text(j, i, cnf_mat[i, j],
horizontalalignment="center",
color="white" if cnf_mat[i, j] > thresh else "black")

plt.imshow(cnf_mat, interpolation='nearest', cmap=self.cmap)

# Labels
tick_marks = np.arange(len(self.classes))
plt.xticks(tick_marks, self.classes, rotation=45)
plt.yticks(tick_marks, self.classes)

plt.colorbar()

plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#plt.draw()
plt.savefig("Matrix.png", bbox_inches='tight')
plt.pause(0.001)






Binary file added model_4.h5
Binary file not shown.
1 change: 1 addition & 0 deletions model_4.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Conv2D", "config": {"name": "conv2d_1", "trainable": true, "batch_input_shape": [null, 97, 12, 1], "dtype": "float32", "filters": 32, "kernel_size": [2, 2], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_2", "trainable": true, "filters": 48, "kernel_size": [2, 2], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Conv2D", "config": {"name": "conv2d_3", "trainable": true, "filters": 120, "kernel_size": [2, 2], "strides": [1, 1], "padding": "valid", "data_format": "channels_last", "dilation_rate": [1, 1], "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "MaxPooling2D", "config": {"name": "max_pooling2d_1", "trainable": true, "pool_size": [2, 2], "padding": "valid", "strides": [2, 2], "data_format": "channels_last"}}, {"class_name": "Dropout", "config": {"name": "dropout_1", "trainable": true, "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Flatten", "config": {"name": "flatten_1", "trainable": true, "data_format": "channels_last"}}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "units": 128, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_2", "trainable": true, "rate": 0.25, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "units": 64, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}, {"class_name": "Dropout", "config": {"name": "dropout_3", "trainable": true, "rate": 0.4, "noise_shape": null, "seed": null}}, {"class_name": "Dense", "config": {"name": "dense_3", "trainable": true, "units": 10, "activation": "softmax", "use_bias": true, "kernel_initializer": {"class_name": "VarianceScaling", "config": {"scale": 1.0, "mode": "fan_avg", "distribution": "uniform", "seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4", "backend": "tensorflow"}
Binary file added my_model_4.pb
Binary file not shown.

0 comments on commit 5263b8f

Please sign in to comment.