-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathngc_classify.py
245 lines (180 loc) · 8.45 KB
/
ngc_classify.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
from utils import make_moving_collate_fn, set_seed
from models import GNCN_PDH_Classify, NGC_ANGC
import os
import pickle
import urllib
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset, random_split
import torchvision
def preprocess_mnist(batch_size, device, N_per_class=None):
transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])
data_train = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transforms)
moving_collate = make_moving_collate_fn(device)
if N_per_class is not None:
selected_idxs = []
for label in range(10):
label_idxs = torch.where(data_train.targets == label)[0]
label_selected_idxs = label_idxs[torch.randperm(len(label_idxs))[:N_per_class]]
selected_idxs.extend(label_selected_idxs.tolist())
data_train = torch.utils.data.Subset(data_train, selected_idxs)
# split into train and validation
if N_per_class is None:
train_size, val_size = 50000, 10000
else:
val_size = int(0.15 * len(data_train))
train_size = len(data_train) - val_size
data_train, data_val = random_split(data_train, [train_size, val_size])
loader_train = DataLoader(data_train, batch_size=batch_size, shuffle=True, collate_fn=moving_collate)
loader_val = DataLoader(data_val, batch_size=batch_size, shuffle=False, collate_fn=moving_collate)
return loader_train, loader_val
def download_mnist1d(data_path):
url = 'https://github.com/greydanus/mnist1d/raw/master/mnist1d_data.pkl'
with urllib.request.urlopen(url) as response, open(data_path, 'wb') as out_file:
data = response.read()
out_file.write(data)
def preprocess_mnist1d(batch_size, device):
data_path = './data/mnist1d_data.pkl'
if not os.path.exists(data_path):
data_dir = os.path.dirname(data_path)
os.makedirs(data_dir, exist_ok=True)
download_mnist1d(data_path)
with open(data_path, 'rb') as handle:
data = pickle.load(handle)
X_train = torch.tensor(data['x'], dtype=torch.float32)
Y_train = torch.tensor(data['y'], dtype=torch.int64)
# X_test = torch.tensor(data['x_test'], dtype=torch.float32)
# Y_test = torch.tensor(data['y_test'], dtype=torch.float32)
train_dataset = TensorDataset(X_train, Y_train)
moving_collate = make_moving_collate_fn(device)
valid_frac = 0.1
valid_size = int(len(train_dataset) * valid_frac)
train_size = len(train_dataset) - valid_size
data_train, data_val = random_split(train_dataset, [train_size, valid_size])
loader_train = DataLoader(data_train, batch_size=batch_size, shuffle=True, collate_fn=moving_collate)
loader_val = DataLoader(data_val, batch_size=batch_size, shuffle=False, collate_fn=moving_collate)
return loader_train, loader_val
def cross_entropy_loss(targets, predictions, eps=1e-7):
clamped_predictions = torch.clamp(predictions, min=eps, max=1.0 - eps)
return -torch.sum(targets * torch.log(clamped_predictions))
def eval_model(model, loader, num_classes):
num_samples = 0
tot_ce_loss = 0.
tot_correct = 0
for (inputs, targets) in loader:
inputs = inputs.view([-1, model.dims[-1]])
preds = model.project(inputs)
targets_oh = F.one_hot(targets, num_classes=num_classes)
tot_ce_loss += cross_entropy_loss(targets_oh, preds)
tot_correct += torch.sum(torch.argmax(preds, dim=1) == targets)
num_samples += inputs.shape[0]
avg_ce_loss = tot_ce_loss / (1.0 * num_samples)
acc = tot_correct / (1.0 * num_samples)
print(f"(Eval) Avg CE loss = {avg_ce_loss}, Avg accuracy: {acc}")
return avg_ce_loss, acc
def run_ngc_pdh(seed, trial_name='ngc'):
set_seed(seed)
num_epochs = 200
num_classes = 10
batch_size = 500
lr = 0.001
dim_inp = 784
dim_hid = 360
K = 60
# err_update_coeff = 0.95
N_per_class = None
checkpoint_dir = 'checkpoints'
os.makedirs(checkpoint_dir, exist_ok=True)
device_name = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
loader_train, loader_val = preprocess_mnist(batch_size, device, N_per_class=N_per_class)
# loader_train, loader_val = preprocess_mnist1d(batch_size, device)
ngc_config = {
'L': 3,
'dims': [num_classes, dim_hid, dim_hid, dim_inp],
'fns_phi': ['identity', 'relu', 'relu', 'identity'],
'fns_g': ['softmax', 'identity', 'identity', 'identity'],
'weight_stddev': 0.025,
'beta': 0.1,
'leak': 0.001,
'use_skip': True,
'use_lateral': False,
'use_err_precision': True,
}
model = GNCN_PDH_Classify(ngc_config, device=device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, maximize=False)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, maximize=False)
val_ce_loss, val_acc = eval_model(model, loader_val, num_classes)
best_val_ce_loss = val_ce_loss
for epoch in range(num_epochs):
print(f"--- Epoch {epoch}")
num_samples = 0
for i, (inputs, targets) in enumerate(loader_train):
inputs = inputs.view([-1, dim_inp])
targets_oh = F.one_hot(targets, num_classes=num_classes)
out_pred = model.infer(targets_oh, inputs, K=K)
optimizer.zero_grad()
model.calc_updates()
num_samples += inputs.shape[0]
optimizer.step()
model.clip_weights()
# print(f"(Train) Avg Total discrepancy = {totd / (1.0 * num_samples)}, Avg BCE loss = {bce_loss / (1.0 * num_samples)}")
val_ce_loss, val_acc = eval_model(model, loader_val, num_classes)
if val_ce_loss < best_val_ce_loss:
checkpoint_filename = f'{checkpoint_dir}/{trial_name}-model.pt'
torch.save(model.state_dict(), checkpoint_filename)
print(f"Saved checkpoint to {checkpoint_filename} (CE loss {best_val_ce_loss} -> {val_ce_loss})")
best_val_ce_loss = val_ce_loss
def run_ngc_angc(seed, trial_name='ngc'):
set_seed(seed)
num_epochs = 200
num_classes = 10
batch_size = 500
lr = 0.001
dim_inp = 784
dim_hid = 360
K = 60
# err_update_coeff = 0.95
N_per_class = None
checkpoint_dir = 'checkpoints'
os.makedirs(checkpoint_dir, exist_ok=True)
device_name = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device_name)
loader_train, loader_val = preprocess_mnist(batch_size, device, N_per_class=N_per_class)
# loader_train, loader_val = preprocess_mnist1d(batch_size, device)
ngc_config = {
'L': 3,
'dims': [num_classes, dim_hid, dim_hid, dim_inp],
'fns_phi': ['identity', 'relu', 'relu', 'relu', 'identity'],
'weight_stddev': 0.025,
'beta': 0.1,
'beta_e': 0.5,
'leak': 0.001,
}
model = NGC_ANGC(ngc_config['L'], ngc_config['dims'], ngc_config['weight_stddev'], beta=ngc_config['beta'], beta_e=ngc_config['beta_e'], gamma=ngc_config['leak'], device=device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, maximize=False)
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, maximize=False)
val_ce_loss, val_acc = eval_model(model, loader_val, num_classes)
best_val_ce_loss = val_ce_loss
for epoch in range(num_epochs):
print(f"--- Epoch {epoch}")
num_samples = 0
for i, (inputs, targets) in enumerate(loader_train):
inputs = inputs.view([-1, dim_inp])
targets_oh = F.one_hot(targets, num_classes=num_classes)
out_pred = model.infer(targets_oh, inputs, K=K)
optimizer.zero_grad()
model.calc_updates()
num_samples += inputs.shape[0]
optimizer.step()
model.normalize_weights()
# print(f"(Train) Avg Total discrepancy = {totd / (1.0 * num_samples)}, Avg BCE loss = {bce_loss / (1.0 * num_samples)}")
val_ce_loss, val_acc = eval_model(model, loader_val, num_classes)
if val_ce_loss < best_val_ce_loss:
checkpoint_filename = f'{checkpoint_dir}/{trial_name}-model.pt'
torch.save(model.state_dict(), checkpoint_filename)
print(f"Saved checkpoint to {checkpoint_filename} (CE loss {best_val_ce_loss} -> {val_ce_loss})")
best_val_ce_loss = val_ce_loss
if __name__ == '__main__':
# run_ngc_pdh(314159, trial_name='ngc-pdh-classify')
run_ngc_angc(314159, trial_name='ngc-angc')