-
Notifications
You must be signed in to change notification settings - Fork 24
/
methods.py
73 lines (52 loc) · 2.4 KB
/
methods.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
class LabelAwareSmoothing(nn.Module):
def __init__(self, cls_num_list, smooth_head, smooth_tail, shape='concave', power=None):
super(LabelAwareSmoothing, self).__init__()
n_1 = max(cls_num_list)
n_K = min(cls_num_list)
if shape == 'concave':
self.smooth = smooth_tail + (smooth_head - smooth_tail) * np.sin((np.array(cls_num_list) - n_K) * np.pi / (2 * (n_1 - n_K)))
elif shape == 'linear':
self.smooth = smooth_tail + (smooth_head - smooth_tail) * (np.array(cls_num_list) - n_K) / (n_1 - n_K)
elif shape == 'convex':
self.smooth = smooth_head + (smooth_head - smooth_tail) * np.sin(1.5 * np.pi + (np.array(cls_num_list) - n_K) * np.pi / (2 * (n_1 - n_K)))
elif shape == 'exp' and power is not None:
self.smooth = smooth_tail + (smooth_head - smooth_tail) * np.power((np.array(cls_num_list) - n_K) / (n_1 - n_K), power)
self.smooth = torch.from_numpy(self.smooth)
self.smooth = self.smooth.float()
if torch.cuda.is_available():
self.smooth = self.smooth.cuda()
def forward(self, x, target):
smoothing = self.smooth[target]
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
class LearnableWeightScaling(nn.Module):
def __init__(self, num_classes):
super(LearnableWeightScaling, self).__init__()
self.learned_norm = nn.Parameter(torch.ones(1, num_classes))
def forward(self, x):
return self.learned_norm * x