-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
570 lines (493 loc) · 21.6 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
import math
from typing import Optional, Union, T
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mixed_precision import maybe_half
from utils import flatten, random_locs_2d, Flatten
from costs import LossMultiNCE
def has_many_gpus():
return torch.cuda.device_count() >= 6
class Encoder(nn.Module):
def __init__(self, dummy_batch, num_channels=3, ndf=64, n_rkhs=512,
n_depth=3, encoder_size=32, use_bn=False, rkhs_conv_depth=0):
"""
Args:
dummy_batch:
num_channels:
ndf:
n_rkhs:
n_depth:
encoder_size:
use_bn: bool flag indicateing to use batch norm
rkhs_conv_depth: how many mlp layers shall be added to the discriminator in feature space
"""
super(Encoder, self).__init__()
self.ndf = ndf
self.n_rkhs = n_rkhs
self.use_bn = use_bn
self.dim2layer = None
# encoding block for local features
print('Using a {}x{} encoder'.format(encoder_size, encoder_size))
if encoder_size == 32:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResNxN(ndf, ndf, 1, 1, 0, use_bn),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 4, True, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 4, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 64:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 128:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 5, 2, 2, False, pad_mode='reflect'),
Conv3x3(ndf, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
else:
raise RuntimeError("Could not build encoder."
"Encoder size {} is not supported".format(encoder_size))
self._config_modules(dummy_batch, [1, 5, 7], n_rkhs, use_bn, rkhs_conv_depth)
def init_weights(self, init_scale=1.):
'''
Run custom weight init for modules...
'''
for layer in self.layer_list:
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
for layer in self.modules():
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
if isinstance(layer, FakeRKHSConvNet):
layer.init_weights(init_scale)
def _config_modules(self, x, rkhs_layers, n_rkhs, use_bn, rkhs_conv_depth):
'''
Configure the modules for extracting fake rkhs embeddings for infomax.
rkhs_conv_depth: int - how many mlp layers shall be added to the mlp head in the Phi sapce
'''
enc_acts = self._forward_acts(x)
self.dim2layer = {}
for i, h_i in enumerate(enc_acts):
for d in rkhs_layers:
if h_i.size(2) == d:
self.dim2layer[d] = i
# get activations and feature sizes at different layers
self.ndf_1 = enc_acts[self.dim2layer[1]].size(1)
self.ndf_5 = enc_acts[self.dim2layer[5]].size(1)
self.ndf_7 = enc_acts[self.dim2layer[7]].size(1)
# configure modules for fake rkhs embeddings
self.rkhs_block_1 = NopNet()
self.rkhs_block_5 = FakeRKHSConvNet(self.ndf_5, n_rkhs, use_bn, rkhs_conv_depth=rkhs_conv_depth)
self.rkhs_block_7 = FakeRKHSConvNet(self.ndf_7, n_rkhs, use_bn, rkhs_conv_depth=rkhs_conv_depth)
def _forward_acts(self, x):
'''
Return activations from all layers.
'''
# run forward pass through all layers
layer_acts = [x]
for _, layer in enumerate(self.layer_list):
layer_in = layer_acts[-1]
layer_out = layer(layer_in)
layer_acts.append(layer_out)
# remove input from the returned list of activations
return_acts = layer_acts[1:]
return return_acts
def forward(self, x):
'''
Compute activations and Fake RKHS embeddings for the batch.
'''
if has_many_gpus():
if x.abs().mean() < 1e-4:
r1 = torch.zeros((1, self.n_rkhs, 1, 1),
device=x.device, dtype=x.dtype).detach()
r5 = torch.zeros((1, self.n_rkhs, 5, 5),
device=x.device, dtype=x.dtype).detach()
r7 = torch.zeros((1, self.n_rkhs, 7, 7),
device=x.device, dtype=x.dtype).detach()
return r1, r5, r7
# compute activations in all layers for x
acts = self._forward_acts(x)
# gather rkhs embeddings from certain layers
r1 = self.rkhs_block_1(acts[self.dim2layer[1]])
r5 = self.rkhs_block_5(acts[self.dim2layer[5]])
r7 = self.rkhs_block_7(acts[self.dim2layer[7]])
return r1, r5, r7
class Evaluator(nn.Module):
def __init__(self, n_classes, ftr_1=None, dim_1=None):
super(Evaluator, self).__init__()
if ftr_1 is None:
# rely on provided input feature dimensions
self.dim_1 = dim_1
else:
# infer input feature dimensions from provided features
self.dim_1 = ftr_1.size(1)
self.n_classes = n_classes
self.block_glb_mlp = \
MLPClassifier(self.dim_1, self.n_classes, n_hidden=1024, p=0.2)
self.block_glb_lin = \
MLPClassifier(self.dim_1, self.n_classes, n_hidden=None, p=0.0)
def forward(self, ftr_1, detach=True):
'''
Input:
ftr_1 : features at 1x1 layer
Output:
lgt_glb_mlp: class logits from global features
lgt_glb_lin: class logits from global features
'''
# collect features to feed into classifiers
# - always detach() -- send no grad into encoder!
if detach:
h_top_cls = flatten(ftr_1).detach()
else:
h_top_cls = flatten(ftr_1)
# compute predictions
lgt_glb_mlp = self.block_glb_mlp(h_top_cls)
lgt_glb_lin = self.block_glb_lin(h_top_cls)
return lgt_glb_mlp, lgt_glb_lin
class Model(nn.Module):
def __init__(self, ndf, n_classes, n_rkhs, tclip=20.,
n_depth=3, use_bn=False, encoder_size=32,
loss_predictions=None, rkhs_conv_depth=0):
super(Model, self).__init__()
self.hyperparams = {
'ndf': ndf,
'n_classes': n_classes,
'n_rkhs': n_rkhs,
'tclip': tclip,
'n_depth': n_depth,
'encoder_size': encoder_size,
'use_bn': use_bn,
'rkhs_conv_depth': rkhs_conv_depth
}
# self.n_rkhs = n_rkhs
self.tasks = ('1t5', '1t7', '5t5', '5t7', '7t7')
dummy_batch = torch.zeros((2, 3, encoder_size, encoder_size))
# encoder that provides multiscale features
self.encoder = Encoder(dummy_batch, num_channels=3, ndf=ndf,
n_rkhs=n_rkhs, n_depth=n_depth,
encoder_size=encoder_size, use_bn=use_bn, rkhs_conv_depth=rkhs_conv_depth)
rkhs_1, rkhs_5, _ = self.encoder(dummy_batch)
# convert for multi-gpu use
self.encoder = nn.DataParallel(self.encoder)
# configure hacky multi-gpu module for infomax costs
self.g2l_loss = LossMultiNCE(tclip=tclip, loss_predictions=loss_predictions)
# configure modules for classification with self-supervised features
self.evaluator = Evaluator(n_classes, ftr_1=rkhs_1)
# gather lists of self-supervised and classifier modules
self.info_modules = [self.encoder.module, self.g2l_loss]
self.class_modules = [self.evaluator]
self.loss_predictions = loss_predictions
def init_weights(self, init_scale=1.):
self.encoder.module.init_weights(init_scale)
def encode(self, x, no_grad=True, use_eval=False):
'''
Encode the images in x, with or without grads detached.
'''
if use_eval:
self.eval()
x = maybe_half(x)
if no_grad:
with torch.no_grad():
rkhs_1, rkhs_5, rkhs_7 = self.encoder(x)
else:
rkhs_1, rkhs_5, rkhs_7 = self.encoder(x)
if use_eval:
self.train()
return maybe_half(rkhs_1), maybe_half(rkhs_5), maybe_half(rkhs_7)
def reset_evaluator(self, n_classes=None):
'''
Reset the evaluator module, e.g. to apply encoder on new data.
- evaluator is reset to have n_classes classes (if given)
'''
dim_1 = self.evaluator.dim_1
if n_classes is None:
n_classes = self.evaluator.n_classes
self.evaluator = Evaluator(n_classes, dim_1=dim_1)
self.class_modules = [self.evaluator]
return self.evaluator
def forward(self, x1, x2, class_only=False, modality=None, training_all=False):
'''
Input:
x1 : images from which to extract features -- x1 ~ A(x)
x2 : images from which to extract features -- x2 ~ A(x)
class_only : whether we want all outputs for infomax training
modality:
training_all:
Output:
res_dict : various outputs depending on the task
'''
# dict for returning various values
res_dict = {}
if class_only:
no_grad = not training_all
# shortcut to encode one image and evaluate classifier
if modality is None:
rkhs_1, _, _ = self.encode(x1, no_grad=no_grad)
elif modality == 'rgb':
rkhs_1, _, _ = self.encode(x1, no_grad=no_grad)
elif modality == 'd' or modality == 'depth':
rkhs_1, _, _ = self.encode(x2, no_grad=no_grad)
elif modality == 'random':
x = x1 if np.random.rand(1) >= .5 else x2
rkhs_1, _, _ = self.encode(x, no_grad=no_grad)
else:
raise BaseException('Unknown modality {}'.format(modality))
lgt_glb_mlp, lgt_glb_lin = self.evaluator(rkhs_1, detach=no_grad)
res_dict['class'] = [lgt_glb_mlp, lgt_glb_lin]
res_dict['rkhs_glb'] = flatten(rkhs_1)
return res_dict
# hack for redistributing workload in highly multi-gpu setting
# -- yeah, "highly-multi-gpu" is obviously subjective...
if has_many_gpus():
n_batch = x1.size(0)
n_gpus = torch.cuda.device_count()
assert (n_batch % (n_gpus - 1) == 0), 'n_batch: {}'.format(n_batch)
# expand input with dummy chunks so cuda:0 can skip compute
chunk_size = n_batch // (n_gpus - 1)
dummy_chunk = torch.zeros_like(x1[:chunk_size])
x1 = torch.cat([dummy_chunk, x1], dim=0)
x2 = torch.cat([dummy_chunk, x2], dim=0)
# run augmented image pairs through the encoder
r1_x1, r5_x1, r7_x1 = self.encoder(x1)
r1_x2, r5_x2, r7_x2 = self.encoder(x2)
# hack for redistributing workload in highly-multi-gpu setting
if has_many_gpus():
# strip off dummy vals returned by cuda:0
r1_x1, r5_x1, r7_x1 = r1_x1[1:], r5_x1[1:], r7_x1[1:]
r1_x2, r5_x2, r7_x2 = r1_x2[1:], r5_x2[1:], r7_x2[1:]
# compute NCE infomax objective at multiple scales
loss_1t5, loss_1t7, loss_5t5, lgt_reg = \
self.g2l_loss(r1_x1, r5_x1, r7_x1, r1_x2, r5_x2, r7_x2)
res_dict['g2l_1t5'] = loss_1t5 if loss_1t5 is not None else None
res_dict['g2l_1t7'] = loss_1t7 if loss_1t7 is not None else None
res_dict['g2l_5t5'] = loss_5t5 if loss_5t5 is not None else None
res_dict['lgt_reg'] = lgt_reg
# grab global features for use elsewhere
res_dict['rkhs_glb'] = flatten(r1_x1)
# compute classifier logits for online eval during infomax training
# - we do this for both images in each augmented pair...
if training_all:
if modality == 'rgb':
lgt_glb_mlp, lgt_glb_lin = self.evaluator(
ftr_1=r1_x1, detach=(not training_all))
elif modality == 'depth':
lgt_glb_mlp, lgt_glb_lin = self.evaluator(
ftr_1=r1_x2, detach=(not training_all))
else:
lgt_glb_mlp, lgt_glb_lin = self.evaluator(
ftr_1=torch.cat([r1_x1, r1_x2]), detach=(not training_all))
else:
lgt_glb_mlp, lgt_glb_lin = self.evaluator(ftr_1=torch.cat([r1_x1, r1_x2]), detach=(not training_all))
res_dict['class'] = [lgt_glb_mlp, lgt_glb_lin]
return res_dict
##############################
# Layers for use in model... #
##############################
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super(MaybeBatchNorm2d, self).__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class NopNet(nn.Module):
def __init__(self, norm_dim=None):
super(NopNet, self).__init__()
self.norm_dim = norm_dim
def forward(self, x):
if self.norm_dim is not None:
x_norms = torch.sum(x**2., dim=self.norm_dim, keepdim=True)
x_norms = torch.sqrt(x_norms + 1e-6)
x = x / x_norms
return x
class Conv3x3(nn.Module):
def __init__(self, n_in, n_out, n_kern, n_stride, n_pad,
use_bn=True, pad_mode='constant'):
super(Conv3x3, self).__init__()
assert(pad_mode in ['constant', 'reflect'])
self.n_pad = (n_pad, n_pad, n_pad, n_pad)
self.pad_mode = pad_mode
self.conv = nn.Conv2d(n_in, n_out, n_kern, n_stride, 0,
bias=(not use_bn))
self.relu = nn.ReLU(inplace=True)
self.bn = MaybeBatchNorm2d(n_out, True, use_bn) if use_bn else None
def forward(self, x):
if self.n_pad[0] > 0:
# pad the input if required
x = F.pad(x, self.n_pad, mode=self.pad_mode)
# conv is always applied
x = self.conv(x)
# apply batchnorm if required
if self.bn is not None:
x = self.bn(x)
# relu is always applied
out = self.relu(x)
return out
class MLPClassifier(nn.Module):
def __init__(self, n_input, n_classes, n_hidden=512, p=0.1):
super(MLPClassifier, self).__init__()
self.n_input = n_input
self.n_classes = n_classes
self.n_hidden = n_hidden
if n_hidden is None:
# use linear classifier
self.block_forward = nn.Sequential(
Flatten(),
nn.Dropout(p=p),
nn.Linear(n_input, n_classes, bias=True)
)
else:
# use simple MLP classifier
self.block_forward = nn.Sequential(
Flatten(),
nn.Dropout(p=p),
nn.Linear(n_input, n_hidden, bias=False),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(p=p),
nn.Linear(n_hidden, n_classes, bias=True)
)
def forward(self, x):
logits = self.block_forward(x)
return logits
class FakeRKHSConvNet(nn.Module):
def __init__(self, n_input, n_output, use_bn=False, rkhs_conv_depth=0):
super(FakeRKHSConvNet, self).__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.relu1 = nn.ReLU(inplace=True)
print(f'Configuring FakeRKHSConvNet with {rkhs_conv_depth} hidden blocks')
parts = []
for i in range(rkhs_conv_depth):
parts.append(nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False))
parts.append(nn.ReLU(inplace=True))
parts.append(MaybeBatchNorm2d(n_output, True, use_bn))
if rkhs_conv_depth == 0:
self.hidden_part = None
else:
self.hidden_part = nn.Sequential(*parts)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
# BN is optional for hidden layer and always for output layer
self.bn_hid = MaybeBatchNorm2d(n_output, True, use_bn)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1,
stride=1, padding=0, bias=True)
# initialize shortcut to be like identity (if possible)
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.uint8)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask).bool(), 1.)
return
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
def init_weights_sequential(m):
if type(m) == nn.Conv2d:
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
m.weight.data.mul_(init_scale)
if self.hidden_part is not None:
self.hidden_part.apply(init_weights_sequential)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
return
def forward(self, x):
"""
Args:
x: tensr type
Returns:
"""
# print(self.conv1.weight.type(), x.type())
h_relu_conv1 = self.relu1(self.bn_hid(self.conv1(x)))
if self.hidden_part is not None:
h_relu_conv1 = self.hidden_part(h_relu_conv1)
h_res = self.conv2(h_relu_conv1)
h = self.bn_out(h_res + self.shortcut(x))
return h
class ConvResNxN(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, use_bn=False):
super(ConvResNxN, self).__init__()
assert (n_out >= n_in)
self.n_in = n_in
self.n_out = n_out
self.width = width
self.stride = stride
self.pad = pad
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(n_in, n_out, width, stride, pad, bias=False)
self.conv2 = nn.Conv2d(n_out, n_out, 1, 1, 0, bias=False)
self.conv3 = None
# ...
self.bn1 = MaybeBatchNorm2d(n_out, True, use_bn)
return
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
return
def forward(self, x):
h1 = self.bn1(self.conv1(x))
h2 = self.conv2(self.relu2(h1))
if (self.n_out < self.n_in):
h3 = self.conv3(x)
elif (self.n_in == self.n_out):
h3 = F.avg_pool2d(x, self.width, self.stride, self.pad)
else:
h3_pool = F.avg_pool2d(x, self.width, self.stride, self.pad)
h3 = F.pad(h3_pool, (0, 0, 0, 0, 0, self.n_out - self.n_in))
h23 = h2 + h3
return h23
class ConvResBlock(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, depth, use_bn):
super(ConvResBlock, self).__init__()
layer_list = [ConvResNxN(n_in, n_out, width, stride, pad, use_bn)]
for i in range(depth - 1):
layer_list.append(ConvResNxN(n_out, n_out, 1, 1, 0, use_bn))
self.layer_list = nn.Sequential(*layer_list)
return
def init_weights(self, init_scale=1.):
'''
Do a fixup-style init for each ConvResNxN in this block.
'''
for m in self.layer_list:
m.init_weights(init_scale)
return
def forward(self, x):
# run forward pass through the list of ConvResNxN layers
x_out = self.layer_list(x)
return x_out