-
Notifications
You must be signed in to change notification settings - Fork 1
/
utils.py
131 lines (117 loc) · 4.54 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import numpy as np
import os
import csv
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from PIL import Image
class TrainLogger():
'''
Log the trainig history.
Usage: logger = TrainLogger("path_to_log.csv")
logger.log(loss_train, loss_test)
This class automatically append the csv file unless overwrite is set True.
'''
def __init__(self, dst_path, overwrite=False):
self.path = dst_path
if overwrite:
with open(self.path, 'w') as f:
writer = csv.writer(f)
header = ["loss_train", "loss_test"]
writer.writerow(header)
def log(self, loss_train, loss_test):
with open(self.path, 'a') as f:
writer = csv.writer(f)
row = [loss_train, loss_test]
writer.writerow(row)
class Predictor():
'''
Generate segmentation images.
Usage: predictor = Predictor(model, weigth_path, device, dst_dir, dataloader)
predictor.infer()
'''
def __init__(self, model, weight_path, device, dst_dir, dataloader):
self.model = model()
self.model.load_state_dict(torch.load(weight_path))
self.model = self.model.to(device)
self.dst_dir = dst_dir
self.dataloader = dataloader
self.device = device
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
def infer(self):
with torch.no_grad():
for data in tqdm(self.dataloader):
x, name, size = data
size = (size[0].item(), size[1].item())
path = os.path.join(self.dst_dir, name[0])
x = x.to(self.device)
fuse, s1, s2, s3, s4, s5 = self.model(x)
maxv = fuse.max().item()
fuse = fuse * (255/maxv) if maxv!=0 else fuse
img = fuse.squeeze().cpu().numpy()
img = Image.fromarray(img).convert("L")
img = img.resize(size)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
img.save(path)
def save_model(model, path):
torch.save(model.state_dict(), path)
def test(model, device, test_loader):
total_loss = 0
with torch.no_grad():
for data in test_loader:
x, y = data
x = x.to(device)
y = y.to(device)
fuse, s1, s2, s3, s4, s5 = model(x)
loss = model.loss(fuse, s1, s2, s3, s4, s5, y)
total_loss += loss.item()
loss = total_loss/len(test_loader.dataset)
return loss
def train(model, device, train_loader, test_loader, optimizer, n_epochs,
scheduler=None, done_epoch=0, prefix="", path_checkpoint="./checkpoint"):
'''
Method for training process.
Args:
done_epoch (int): if you resume the training, set the number of epochs you have done before.
prefix (str): name the prefix for auto-saving files
path_checkpoint (str): this method automatically saves parameters to
this location for every 10 epochs.
For every epoch, parameters will be saved as "hed.model"
'''
if not os.path.exists(path_checkpoint):
os.makedirs(path_checkpoint)
if done_epoch >= n_epochs:
print("epochs exceeded{0}".format(n_epochs))
return
logger = TrainLogger("{0}-history.csv".format(prefix))
for epoch in range(done_epoch+1, n_epochs):
train_total_loss = 0
for data in tqdm(train_loader):
x, y = data
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
fuse, s1, s2, s3, s4, s5 = model(x)
loss = model.loss(fuse, s1, s2, s3, s4, s5, y)
train_total_loss += loss.item()
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
train_loss = train_total_loss / len(train_loader.dataset)
test_loss = test(model, device, test_loader)
logger.log(train_loss, test_loss)
save_model(model, "hed.model")
if epoch % 10 == 0:
name = "{0}-ep{1}.model".format(prefix, epoch)
save_model(model, os.path.join(path_checkpoint, name))
def plot_loss(train_loss, test_loss):
x = np.arange(len(train_loss))
plt.plot(x, train_loss)
plt.plot(x, test_loss)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend(["train", "test"])