-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
executable file
·246 lines (210 loc) · 8.92 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataset import pll_datst, coll, mono_datst
from preprocessing import load_data, tokenizer
from model2 import xlmb2b
from tqdm import tqdm
from os import path
from functools import partial
from nltk.translate.bleu_score import corpus_bleu
import multiprocessing as mp
from Globals import *
import argparse
parser = argparse.ArgumentParser(description= 'Train the Model')
parser.add_argument('--dataset_path')
parser.add_argument('--p', type=float)
parser.add_argument('--ksample', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--trfrmr_nlayers', type=int)
args = parser.parse_args()
if path.exists(args.dataset_path+"/file_1.csv") :
data_obj = load_data(load_ = False, dataset_path = args.dataset_path)
else:
data_obj = load_data(dataset_path=args.dataset_path)
df_prllel, df_en, df_de = data_obj.final_data()
pll_train_ds = pll_datst(df_prllel)
mono_train_ds_en = mono_datst(df_en)
mono_train_ds_de = mono_datst(df_de, lang='de')
vocab_size = tokenizer.vocab_size
b_sz = args.batch_size
batch_size = args.batch_size
d_model = 1024
model_ed = xlmb2b(trfrmr_nlayers=args.trfrmr_nlayers).double().to(device)
model_de = xlmb2b(trfrmr_nlayers=args.trfrmr_nlayers).double().to(device)
del model_ed.xlm
model_ed.xlm = model_de.xlm
model_ed.p = args.p
model_de.p = args.p
model_ed.beam_size = args.ksample
model_de.beam_size = args.ksample
cpus = mp.cpu_count()
pll_train_loader = DataLoader(pll_train_ds,batch_size=b_sz, collate_fn = partial(coll, pll_dat = True), pin_memory=True, num_workers=cpus)
mono_train_loader_en = DataLoader(mono_train_ds_en, batch_size=b_sz, collate_fn = partial(coll, pll_dat =False), pin_memory=True, num_workers=cpus)
mono_train_loader_de = DataLoader(mono_train_ds_de, batch_size=b_sz, collate_fn = partial(coll, pll_dat =False), pin_memory=True, num_workers=cpus)
optimizer_ed = torch.optim.Adam(model_ed.parameters(), lr = 0.01)
optimizer_de = torch.optim.Adam(model_ed.parameters(), lr = 0.01)
mseloss = nn.MSELoss()
cross_entropy_loss = nn.CrossEntropyLoss()
def calculate_bleu(ref, cand, weights = (0.25, 0.25, 0.25, 0.25)):
"""
ref: (batch_size, seq_len, 1)
cand: (batch_size, seq_len, 1)
"""
references = []
candidates = []
dict_ = tokenizer.decoder
for i in range(ref.shape[0]):
refs = []
cands = []
for j in range(ref[i].shape[0]):
refs.append(dict_[ref[i][j]])
cands.append(dict_[cand[i][j]])
references.append([refs])
candidates.append(cands)
return corpus_bleu(references, candidates, weights)
def reshape_n_edit(probs) :
'''returns probs while removing rows with all 0 probs
the rows with all nan probs are due to padding of all
sequences to same length'''
y = probs.reshape(-1,vocab_size)
return y[y==y].reshape(-1,vocab_size)
def assign_features(batch) :
batch['X']['attention_mask'] = (batch['X']['input_ids']==tokenizer.pad_token_id).float()
batch['X']['lengths'] = batch['X']['attention_mask'].sum(dim=1).long()
max_size = int(batch['X']['lengths'].max())
bs = batch['X']['input_ids'].shape[0]
batch['X']['position_ids'] = torch.tensor([[i for i in range(max_size)]*bs], dtype=torch.long)
if (batch['X']['langs']==en_lang_id).sum() == 0 :
batch['X']['langs'] = torch.LongTensor([[en_lang_id]*max_size for i in range(b_sz)])
else :
batch['X']['langs'] = torch.LongTensor([[de_lang_id]*max_size for i in range(b_sz)])
return batch
def swap(batch,sr_embd,tr_embd,pll=True) :
'''Replaces X with Y and input_ids with embeddings for pll data
For mono data , replaces input_ids with predicted tokens'''
if pll:
z2=batch['X']
z = batch['X']['input_ids'].clone()
z1 = batch['Y']['input_ids'].clone()
batch['X'] = batch['Y']
batch['Y'] = z2
batch['X']['input_ids'] = tr_embd
batch['Y']['input_ids'] = sr_embd
return batch, z, z1
else:
batch1 = {}
batch1['X'] = {}
for k, v in batch['X'].items() :
batch1['X'][k] = v.clone()
z = batch1['X']['input_ids']
batch['X']['input_ids'] = tr_embd
batch = assign_features(batch)
return batch, z, batch1
def flip_masks(batch) :
batch['X']['attention_mask'] = (~(batch['X']['attention_mask'].bool())).float()
batch['Y']['attention_mask'] = (~(batch['Y']['attention_mask'].bool())).float()
return batch
def freeze_weights(model) :
for param in model.parameters() :
param.requires_grad = False
def unfreeze_weights(model) :
for param in model.parameters() :
param.requires_grad = True
def remove_pad_tokens(tensorr):
j = tokenizer.pad_token_id
return tensorr[tensorr!=j]
def set_to_eval(model_lis, beam_size=3) :
for model in model_lis :
model.eval()
model.beam_size = beam_size
def send_to_gpu(batch, pll) :
lis =['X', 'Y'] if pll else ['X']
for elem in lis :
for key, value in batch[elem].items() :
batch[elem][key] = value.to(device, non_blocking=True)
return batch
def evaluate(model, i, beam_size=3) :
set_to_eval(model,beam_size)
print(str(i)+"th, Forward Model: ", model[0](c))
print(str(i)+"th, Backward Model: ", model[1](d))
def synchronize() :
if torch.cuda.is_available() :
torch.cuda.synchronize()
def run(model_forward,model_backward,batch,optimizers,pll=True):
probs, sr_embd, tr_embd = model_forward(batch)
if pll : loss_pll = cross_entropy_loss(reshape_n_edit(probs), remove_pad_tokens(batch['Y']['input_ids'].reshape(-1)) )
batch, a, b = swap(batch, sr_embd, tr_embd, pll)
probs_, sr_embd_, tr_embd_ = model_backward(batch, True)
loss_b2b = cross_entropy_loss(reshape_n_edit(probs_), remove_pad_tokens(a.reshape(-1)))
if pll : loss = loss_pll + loss_b2b
else : loss = loss_b2b
for optimizer in optimizers :
optimizer.zero_grad()
loss.backward()
del probs_, sr_embd, sr_embd_, tr_embd, tr_embd_, probs
synchronize()
for optimizer in optimizers :
optimizer.step()
return a,b,loss
def check_thresholds(loss1,loss2,model_ed,model_de, epochs) :
global xlm_freezed
if xlm_freezed and loss1<thresh_for_xlm_weight_freeze and loss2<thresh_for_xlm_weight_freeze:
unfreeze_weights(model_ed.xlm)
xlm_freezed = False
elif not model_de.begin_prgrsiv_real_to_pred and loss1<thresh_to_start_real_to_pred_prgrsiv and loss2<thresh_to_start_real_to_pred_prgrsiv :
model_de.begin_prgrsiv_real_to_pred = True
model_ed.begin_prgrsiv_real_to_pred = True
return
elif model_de.begin_prgrsiv_xlm_to_plt and epochs>thresh_to_stop_xlm_to_plt_prgrsiv :
model_de.begin_prgrsiv_xlm_to_plt = False
model_ed.begin_prgrsiv_xlm_to_plt = False
def save_checkpoint(model_ed, model_de) :
torch.save(model_ed.state_dict(), '../b2b_wts/model_ed.pt')
torch.save(model_de.state_dict(), '../b2b_wts/model_de.pt')
losses_epochs = {"pll" : [], "mono": []}
optimizers = [optimizer_de,optimizer_ed]
freeze_weights(model_de.xlm)
xlm_freezed = True
for epoch in tqdm(range(num_epochs)) :
print(epoch)
model_ed.pll_dat=True
model_de.pll_dat=True
losses = [[], []]
for i, batch in enumerate(pll_train_loader) :
batch = send_to_gpu(batch, pll=True)
batch['Y']['input_ids'], batch['X']['input_ids'], loss1 = run(model_ed,model_de,batch,optimizers)
losses[0].append(loss1.item())
if i>=2000 and i%2000==0 :
print(i, sum(losses[1][i-2000:i])/2000)
del loss1
synchronize()
batch = flip_masks(batch)
_,_,loss2 = run(model_de,model_ed,batch,optimizers)
losses[1].append(loss2.item())
if i>=2000 and i%2000==0 :
print(i, sum(losses[1][i-2000:i])/2000)
del loss2
synchronize()
check_thresholds(losses[0][-1],losses[1][-1], model_ed, model_de, epoch)
save_checkpoint(model_ed, model_de)
losses_epochs['pll'].append([sum(losses[0])/len(losses[0]), sum(losses[1])/len(losses[1])])
#Training on monolingual data if the above losses are sufficiently low:
if(losses_epochs['pll'][-1][0]<thresh_for_mono_data or losses['pll'][-1][1]<thresh_for_mono_data):
print("Going for Monolingual Training")
model_ed.pll_data = False
model_de.pll_data = False
losses = [[], []]
for i, batch in enumerate(mono_train_loader_en):
batch = send_to_gpu(batch, pll=False)
_,_,loss1 = run(model_ed,model_de,batch,optimizers,pll=False)
losses[0].append(loss1.item())
del loss1
synchronize()
for i, batch in enumerate(mono_train_loader_de):
batch = send_to_gpu(batch, pll=False)
_,_,loss2 = run(model_de,model_ed,batch,optimizers,pll=False)
losses[1].append(loss2.item())
del loss2
synchronize()
losses_epochs['mono'].append([sum(losses[0])/len(losses[0]), sum(losses[1])/len(losses[1])])