-
Notifications
You must be signed in to change notification settings - Fork 32
/
train.py
executable file
·89 lines (71 loc) · 2.67 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import numpy as np
import random
from data.Dataset import SingleImageDataset
from models.model import Model
from util.losses import LossG
from util.util import get_scheduler, get_optimizer, save_result
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train_model(dataroot, callback=None):
with open("conf/default/config.yaml", "r") as f:
config = yaml.safe_load(f)
cfg = config
if dataroot is not None:
cfg['dataroot'] = dataroot
# set seed
seed = cfg['seed']
if seed == -1:
seed = np.random.randint(2 ** 32 - 1, dtype=np.int64)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
print(f'running with seed: {seed}.')
# create dataset, loader
dataset = SingleImageDataset(cfg)
# define model
model = Model(cfg)
# define loss function
criterion = LossG(cfg)
# define optimizer, scheduler
optimizer = get_optimizer(cfg, model.netG.parameters())
scheduler = get_scheduler(optimizer,
lr_policy=cfg['scheduler_policy'],
n_epochs=cfg['n_epochs'],
n_epochs_decay=cfg['scheduler_n_epochs_decay'],
lr_decay_iters=cfg['scheduler_lr_decay_iters'])
with tqdm(range(1, cfg['n_epochs'] + 1)) as tepoch:
for epoch in tepoch:
inputs = dataset[0]
for key in inputs:
inputs[key] = inputs[key].to(device)
optimizer.zero_grad()
outputs = model(inputs)
losses = criterion(outputs, inputs)
loss_G = losses['loss']
log_data = losses
log_data['epoch'] = epoch
# update learning rate
lr = optimizer.param_groups[0]['lr']
log_data["lr"] = lr
tepoch.set_description(f"Epoch {log_data['epoch']}")
tepoch.set_postfix(loss=log_data["loss"].item(), lr=log_data["lr"])
# log current generated entire image
if epoch % cfg['log_images_freq'] == 0:
img_A = dataset.get_A().to(device)
with torch.no_grad():
output = model.netG(img_A)
save_result(output[0], cfg['dataroot'])
if callback is not None:
callback(output[0])
loss_G.backward()
optimizer.step()
scheduler.step()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--dataroot", type=str)
args = parser.parse_args()
dataroot = args.dataroot
train_model(dataroot)