forked from titu1994/LSTM-FCN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
vgg_gpu0.py
45 lines (35 loc) · 2.4 KB
/
vgg_gpu0.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from utils.constants import max_seq_len, nb_classes
from utils.keras_utils import train_model, evaluate_model, set_trainable, visualize_context_vector, visualize_cam
from utils.model_utils import lstm_fcn_model, alstm_fcn_model
from utils.model_utils import cnn_raw_model, cnn_dtwfeatures_model, cnn_earlyfusion_model, cnn_midfusion_model, cnn_latefusion_model
from utils.model_utils import vgg_raw_model, vgg_dtwfeatures_model, vgg_earlyfusion_model, vgg_midfusion_model, vgg_latefusion_model
import sys
import math
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
if __name__ == "__main__":
dataset = sys.argv[1]
method = sys.argv[2]
proto_num = int(sys.argv[3])
max_seq_lenth = max_seq_len(dataset)
nb_class = nb_classes(dataset)
nb_cnn = int(round(math.log(max_seq_lenth, 2))-3)
#model = lstm_fcn_model(proto_num, max_seq_lenth, nb_class)
#model = alstm_fcn_model(proto_num, max_seq_lenth, nb_class)
#model = cnn_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = cnn_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = cnn_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = cnn_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = cnn_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = vgg_raw_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = vgg_dtwfeatures_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = vgg_earlyfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
model = vgg_midfusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
#model = vgg_latefusion_model(nb_cnn, proto_num, max_seq_lenth, nb_class)
print("Number of Pooling Layers: %s" % str(nb_cnn))
train_model(model, dataset, method, proto_num, dataset_prefix=dataset, nb_iterations=50000, batch_size=50, opt='Nadam', learning_rate=0.0001, early_stop=False, balance_classes=False, run_ver='vgg_')
acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="vgg_loss")
np.savetxt("output/vgg/vgg-%s-%s-%s-loss-%s" % (dataset, method, str(proto_num), str(acc)), [acc])
acc = evaluate_model(model, dataset, method, proto_num, dataset_prefix=dataset, batch_size=50, checkpoint_prefix="vgg_val_acc")
np.savetxt("output/vgg/vgg-%s-%s-%s-vacc-%s" % (dataset, method, str(proto_num), str(acc)), [acc])