-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtensorflow_casestudy.py
75 lines (60 loc) · 3.34 KB
/
tensorflow_casestudy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# we must import the libraries once again since we haven't imported them in this file
import numpy as np
import tensorflow as tf
# let's create a temporary variable npz, where we will store each of the three Audiobooks datasets
npz = np.load('Audiobooks_data_train.npz')
# we extract the inputs using the keyword under which we saved them
# to ensure that they are all floats, let's also take care of that
train_inputs = npz['inputs'].astype(np.float)
# targets must be int because of sparse_categorical_crossentropy (we want to be able to smoothly one-hot encode them)
train_targets = npz['targets'].astype(np.int)
# we load the validation data in the temporary variable
npz = np.load('Audiobooks_data_validation.npz')
# we can load the inputs and the targets in the same line
validation_inputs, validation_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)
# we load the test data in the temporary variable
npz = np.load('Audiobooks_data_test.npz')
# we create 2 variables that will contain the test inputs and the test targets
test_inputs, test_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)
# Set the input and output sizes
input_size = 10
output_size = 2
# Use same hidden layer size for both hidden layers. Not a necessity.
hidden_layer_size = 50
# define how the model will look like
model = tf.keras.Sequential([
# tf.keras.layers.Dense is basically implementing: output = activation(dot(input, weight) + bias)
# it takes several arguments, but the most important ones for us are the hidden_layer_size and the activation function
tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 1st hidden layer
tf.keras.layers.Dense(hidden_layer_size, activation='relu'), # 2nd hidden layer
# the final layer is no different, we just make sure to activate it with softmax
tf.keras.layers.Dense(output_size, activation='softmax') # output layer
])
### Choose the optimizer and the loss function
# we define the optimizer we'd like to use,
# the loss function,
# and the metrics we are interested in obtaining at each iteration
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
### Training
# That's where we train the model we have built.
# set the batch size
batch_size = 100
# set a maximum number of training epochs
max_epochs = 100
# set an early stopping mechanism
# let's set patience=2, to be a bit tolerant against random validation loss increases
early_stopping = tf.keras.callbacks.EarlyStopping(patience=2)
# fit the model
# note that this time the train, validation and test data are not iterable
model.fit(train_inputs, # train inputs
train_targets, # train targets
batch_size=batch_size, # batch size
epochs=max_epochs, # epochs that we will train for (assuming early stopping doesn't kick in)
# callbacks are functions called by a task when a task is completed
# task here is to check if val_loss is increasing
callbacks=[early_stopping], # early stopping
validation_data=(validation_inputs, validation_targets), # validation data
verbose = 2 # making sure we get enough information about the training process
)
test_loss, test_accuracy = model.evaluate(test_inputs, test_targets)
print('\nTest loss: {0:.2f}. Test accuracy: {1:.2f}%'.format(test_loss, test_accuracy*100.))