-
Notifications
You must be signed in to change notification settings - Fork 0
/
FeatureSelectionJob.py
204 lines (172 loc) · 8.73 KB
/
FeatureSelectionJob.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import time
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
import copy
import pandas as pd
import os
import csv
import sys
def job(full_path,do_mutual_info,do_multisurf,max_features_to_keep,filter_poor_features,top_results,export_scores,class_label,instance_label,cv_partitions,overwrite_cv):
job_start_time = time.time()
dataset_name = full_path.split('/')[-1]
selected_feature_lists = {}
meta_feature_ranks = {}
algorithms = []
filter_poor_features = filter_poor_features == 'True'
#Mutual Information
if do_mutual_info == 'True':
algorithms.append('Mutual Information')
selected_feature_lists,meta_feature_ranks = reportAveFS("Mutual Information","mutualinformation",cv_partitions,top_results,full_path,selected_feature_lists,meta_feature_ranks,export_scores)
#MultiSURF
if do_multisurf == 'True':
algorithms.append('MultiSURF')
selected_feature_lists,meta_feature_ranks = reportAveFS("MultiSURF","multisurf",cv_partitions,top_results,full_path,selected_feature_lists,meta_feature_ranks,export_scores)
if len(algorithms) != 0:
#Feature Selection
if filter_poor_features:
#Identify top feature subset
cv_selected_list = selectFeatures(algorithms,cv_partitions,selected_feature_lists,max_features_to_keep,meta_feature_ranks)
#Generate new datasets with selected feature subsets
genFilteredDatasets(cv_selected_list,class_label,instance_label,cv_partitions,full_path+'/CVDatasets',dataset_name,overwrite_cv)
# Save Runtime
runtime_file = open(full_path + '/runtime/runtime_featureselection.txt', 'w')
runtime_file.write(str(time.time() - job_start_time))
runtime_file.close()
# Print completion
print(dataset_name + " phase 4 complete")
experiment_path = '/'.join(full_path.split('/')[:-1])
job_file = open(experiment_path + '/jobsCompleted/job_featureselection_' + dataset_name + '.txt', 'w')
job_file.write('complete')
job_file.close()
def reportAveFS(algorithm,algorithmlabel,cv_partitions,top_results,full_path,selected_feature_lists,meta_feature_ranks,export_scores):
#Calculate score sums
counter = 0
cv_keep_list = []
feature_name_ranks = []
for i in range(0,cv_partitions):
scoreInfo = full_path+"/"+algorithmlabel+"/pickledForPhase4/"+str(i)
file = open(scoreInfo, 'rb')
rawData = pickle.load(file)
file.close()
scoreDict = rawData[1]
score_sorted_features = rawData[2]
feature_name_ranks.append(score_sorted_features)
if counter == 0:
scoreSum = copy.deepcopy(scoreDict)
else:
for each in rawData[1]:
scoreSum[each] += scoreDict[each]
counter += 1
keep_list = []
for each in scoreDict:
if scoreDict[each] > 0:
keep_list.append(each)
cv_keep_list.append(keep_list)
selected_feature_lists[algorithm] = cv_keep_list
meta_feature_ranks[algorithm] = feature_name_ranks
#Generate barplot of average scores
if export_scores == 'True':
# Make the sum of scores an average
for v in scoreSum:
scoreSum[v] = scoreSum[v] / float(cv_partitions)
# Sort averages (decreasing order and print top 'n' and plot top 'n'
f_names = []
f_scores = []
for each in scoreSum:
f_names.append(each)
f_scores.append(scoreSum[each])
names_scores = {'Names': f_names, 'Scores': f_scores}
ns = pd.DataFrame(names_scores)
ns = ns.sort_values(by='Scores', ascending=False)
# Select top 'n' to report and plot
ns = ns.head(top_results)
# Visualize sorted feature scores
ns['Scores'].plot(kind='barh', figsize=(6, 12))
plt.ylabel('Features')
plt.xlabel(str(algorithm) + ' Score')
plt.yticks(np.arange(len(ns['Names'])), ns['Names'])
plt.title('Sorted ' + str(algorithm) + ' Scores')
plt.savefig((full_path+"/"+algorithmlabel+"/TopAverageScores.png"), bbox_inches="tight")
plt.close('all')
return selected_feature_lists,meta_feature_ranks
def selectFeatures(algorithms, cv_partitions, selectedFeatureLists, maxFeaturesToKeep, metaFeatureRanks):
cv_Selected_List = [] # list of selected features for each cv
numAlgorithms = len(algorithms)
if numAlgorithms > 1: # 'Interesting' features determined by union of feature selection results (from different algorithms)
for i in range(cv_partitions):
unionList = selectedFeatureLists[algorithms[0]][i] # grab first algorithm's lists
# Determine union
for j in range(1, numAlgorithms): # number of union comparisons
unionList = list(set(unionList) | set(selectedFeatureLists[algorithms[j]][i]))
if len(unionList) > maxFeaturesToKeep: # Apply further filtering if more than max features remains
# Create score list dictionary with indexes in union list
newFeatureList = []
k = 0
while len(newFeatureList) < maxFeaturesToKeep:
for each in metaFeatureRanks:
targetFeature = metaFeatureRanks[each][i][k]
if not targetFeature in newFeatureList:
newFeatureList.append(targetFeature)
if len(newFeatureList) < maxFeaturesToKeep:
break
k += 1
unionList = newFeatureList
unionList.sort() # Added to ensure script random seed reproducibility
cv_Selected_List.append(unionList)
else: # Only one algorithm applied
for i in range(cv_partitions):
featureList = selectedFeatureLists[algorithms[0]][i] # grab first algorithm's lists
if len(featureList) > maxFeaturesToKeep: # Apply further filtering if more than max features remains
# Create score list dictionary with indexes in union list
newFeatureList = []
k = 0
while len(newFeatureList) < maxFeaturesToKeep:
targetFeature = metaFeatureRanks[algorithms[0]][i][k]
newFeatureList.append(targetFeature)
k += 1
featureList = newFeatureList
cv_Selected_List.append(featureList)
return cv_Selected_List
def genFilteredDatasets(cv_selected_list,class_label,instance_label,cv_partitions,path_to_csv,dataset_name,overwrite_cv):
#create lists to hold training and testing set dataframes.
trainList = []
testList = []
for i in range(cv_partitions):
#Load training partition
trainSet = pd.read_csv(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Train.csv", na_values='NA', sep = ",")
trainList.append(trainSet)
#Load testing partition
testSet = pd.read_csv(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Test.csv", na_values='NA', sep = ",")
testList.append(testSet)
#Training datasets
labelList = [class_label]
if instance_label != 'None':
labelList.append(instance_label)
labelList = labelList + cv_selected_list[i]
td_train = trainList[i][labelList]
td_test = testList[i][labelList]
if overwrite_cv == 'True':
#Remove old CV files
os.remove(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Train.csv")
os.remove(path_to_csv+'/'+dataset_name+'_CV_' + str(i) + "_Test.csv")
else:
#Rename old CV files
os.rename(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Train.csv",path_to_csv+'/'+dataset_name+'_CVPre_' + str(i) +"_Train.csv")
os.rename(path_to_csv+'/'+dataset_name+'_CV_' + str(i) + "_Test.csv",path_to_csv+'/'+dataset_name+'_CVPre_' + str(i) +"_Test.csv")
#Write new CV files
with open(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Train.csv",mode='w') as file:
writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(td_train.columns.values.tolist())
for row in td_train.values:
writer.writerow(row)
file.close()
with open(path_to_csv+'/'+dataset_name+'_CV_' + str(i) +"_Test.csv",mode='w') as file:
writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(td_test.columns.values.tolist())
for row in td_test.values:
writer.writerow(row)
file.close()
if __name__ == '__main__':
job(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), sys.argv[5], int(sys.argv[6]),sys.argv[7], sys.argv[8],sys.argv[9],int(sys.argv[10]),sys.argv[11])