-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathEvaluation_ROC_AUC_Acc_Loss_Graphs.py
55 lines (44 loc) · 1.38 KB
/
Evaluation_ROC_AUC_Acc_Loss_Graphs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 11:14:40 2020
@author: Karthikeyan S
"""
#ROC Curve code
y_pred = model.predict(X_test)
#Keeping only positive outcome probabilities
y_pred = y_pred[:, 1]
ns_fpr , ns_tpr , thresholds = roc_curve(y_test , ns_probs)
fpr , tpr , thresholds = roc_curve(y_test , y_pred)
plt.plot(ns_fpr , ns_tpr , linestyle='--' , label = 'No Skill')
plt.plot(fpr , tpr , marker = '.' , label = 'Built model')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend()
plt.savefig('ROC.png')
plt.show()
#AUC Score
lr_auc = roc_auc_score(y_test, y_pred)
print('AUC Score : %.3f' % (lr_auc))
#Training accuracy vs Validation accuracy
training_accuracy = history.history['accuracy']
validation_accuracy = history.history['val_accuracy']
plt.plot(training_accuracy)
plt.plot(validation_accuracy)
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('AccuracyVS.png')
plt.show()
training_loss = history.history['loss']
validation_loss = history.history['val_loss']
#Training loss vs Validation loss
plt.plot(training_loss)
plt.plot(validation_loss)
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('LossVS.png')
plt.show()