-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
138 lines (112 loc) · 5.28 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import math
import torch
from torch import nn
from torch.nn.utils import weight_norm
class RNN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layer):
super().__init__()
self.lstm_net = nn.LSTM(input_size=input_dim,
hidden_size=hidden_dim,
num_layers=num_layer,
bidirectional=False,
batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
x, _ = self.lstm_net(x)
# x = x[:,-1, :] # if label is simple
x = self.fc(x)
logits = torch.sigmoid(x)
return logits
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(TCN, self).__init__()
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
def forward(self, inputs):
"""Inputs have to have dimension (N, C_in, L_in)"""
y1 = self.tcn(inputs.transpose(-1,1)) # input should have dimension (N, C, L)
o = self.linear(y1.transpose(-1,1)) # input to Linear layer should have dimension (N, L, C)
# o = self.linear(y1[:, :, -1]) # for simple label
return torch.sigmoid(o)
class TimeSeriesTransformer(nn.Module):
def __init__(self, input_dim, output_dim, num_head=8, num_layers=6, pos_encoding=False, dropout=0.0):
super().__init__()
self.pos_encoding = pos_encoding
if self.pos_encoding:
self.pos_encoder = PositionalEncoder(d_model=input_dim)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=input_dim, nhead=num_head, batch_first=True)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.fc = nn.Linear(input_dim, output_dim)
def forward(self, src, mask=None):
if self.pos_encoding:
x = self.pos_encoder(src)
x = self.transformer_encoder(src, mask=mask) # src should have dimension (N, S, E)
x = self.fc(x)
logits = torch.sigmoid(x)
return logits
class PositionalEncoder(torch.nn.Module):
def __init__(self, d_model, max_seq_len=1000):
super().__init__()
self.d_model = d_model
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / d_model)))
if i < d_model - 1:
pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
with torch.no_grad():
x = x * math.sqrt(self.d_model)
seq_len = x.size(1)
pe = self.pe[:, :seq_len]
x = x + pe
return x