-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdemo_semantic_segmentation.py
79 lines (63 loc) · 2.48 KB
/
demo_semantic_segmentation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os, csv, torch, numpy, scipy.io, PIL.Image, torchvision.transforms
import matplotlib.pyplot as plt
from mit_semseg.models import ModelBuilder, SegmentationModule
from mit_semseg.utils import colorEncode
from lib.utils.demo_utils import download_segmentation
colors = scipy.io.loadmat('data/segm_data/color150.mat')['colors']
names = {}
with open('data/segm_data/object150_info.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
names[int(row[0])] = row[5].split(";")[0]
def visualize_result(img, pred, index=None):
# filter prediction class if requested
if index is not None:
pred = pred.copy()
pred[pred != index] = -1
print(f'{names[index+1]}:')
# colorize prediction
pred_color = colorEncode(pred, colors).astype(numpy.uint8)
# aggregate images and save
im_vis = numpy.concatenate((img, pred_color), axis=1)
plt.imshow(PIL.Image.fromarray(im_vis))
plt.show()
download_segmentation()
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch='resnet101dilated',
fc_dim=2048,
weights='data/segm_data/encoder_epoch_25.pth')
net_decoder = ModelBuilder.build_decoder(
arch='ppm_deepsup',
fc_dim=2048,
num_class=150,
weights='data/segm_data/decoder_epoch_25.pth',
use_softmax=True)
crit = torch.nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.eval()
segmentation_module.cuda()
# Load and normalize one image as a singleton tensor batch
pil_to_tensor = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], # These are RGB mean+std values
std=[0.229, 0.224, 0.225]) # across a large photo dataset.
])
pil_image = PIL.Image.open('./data/test_images/segmentation_example.jpg').convert('RGB')
img_original = numpy.array(pil_image)
img_data = pil_to_tensor(pil_image)
singleton_batch = {'img_data': img_data[None].cuda()}
output_size = img_data.shape[1:]
# Run the segmentation at the highest resolution.
with torch.no_grad():
scores = segmentation_module(singleton_batch, segSize=output_size)
# Get the predicted scores for each pixel
_, pred = torch.max(scores, dim=1)
pred = pred.cpu()[0].numpy()
visualize_result(img_original, pred)
# Top classes in answer
predicted_classes = numpy.bincount(pred.flatten()).argsort()[::-1]
for c in predicted_classes[:15]:
visualize_result(img_original, pred, c)