-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy patheval_semseg.py
226 lines (180 loc) · 7.79 KB
/
eval_semseg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
# This code is referenced from
# https://github.com/facebookresearch/astmt/
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# License: Attribution-NonCommercial 4.0 International
import warnings
import cv2
import os.path
import glob
import json
import numpy as np
import torch
from PIL import Image
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
NYU_CATEGORY_NAMES = ['wall', 'floor', 'cabinet', 'bed', 'chair',
'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'blinds', 'desk', 'shelves',
'curtain', 'dresser', 'pillow', 'mirror', 'floor mat',
'clothes', 'ceiling', 'books', 'refridgerator', 'television',
'paper', 'towel', 'shower curtain', 'box', 'whiteboard',
'person', 'night stand', 'toilet', 'sink', 'lamp',
'bathtub', 'bag', 'otherstructure', 'otherfurniture', 'otherprop']
KITTI_CATEGORY_NAMES = ['road', 'non road']
# KITTI_CATEGORY_NAMES = ['background', 'road', 'non road']
CITYSCAPES_CATEGORY_NAMES = ['road', 'non road']
def eval_semseg(loader, folder, n_classes=20, has_bg=True):
n_classes = n_classes + int(has_bg)
# Iterate
tp = [0] * n_classes
fp = [0] * n_classes
fn = [0] * n_classes
for i, sample in enumerate(loader):
if i % 20 == 0:
print('Evaluating: {} of {} objects'.format(i, len(loader)))
# Load result
# filename = os.path.join(folder, sample['meta']['image'])
fname = sample['meta']['image']
im_name_splits = fname.split('_')
filename = os.path.join(folder, im_name_splits[0] + '_road_' + im_name_splits[1] + '.png')
mask = np.array(Image.open(filename)).astype(np.float32)
gt = sample['semseg']
# gt = gt[0,:,:]
# valid = (gt != 255)
if mask.shape != gt.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
mask = cv2.resize(mask, gt.shape[::-1], interpolation=cv2.INTER_NEAREST)
# TP, FP, and FN evaluation
for i_part in range(0, n_classes):
tmp_gt = (gt == i_part)
tmp_pred = (mask == i_part)
# tp[i_part] += np.sum(tmp_gt & tmp_pred & valid)
# fp[i_part] += np.sum(~tmp_gt & tmp_pred & valid)
# fn[i_part] += np.sum(tmp_gt & ~tmp_pred & valid)
tp[i_part] += np.sum(tmp_gt & tmp_pred)
fp[i_part] += np.sum(~tmp_gt & tmp_pred)
fn[i_part] += np.sum(tmp_gt & ~tmp_pred)
jac = [0] * n_classes
for i_part in range(0, n_classes):
jac[i_part] = float(tp[i_part]) / max(float(tp[i_part] + fp[i_part] + fn[i_part]), 1e-8)
# Write results
eval_result = dict()
eval_result['jaccards_all_categs'] = jac
eval_result['mIoU'] = np.mean(jac)
return eval_result
class SemsegMeter(object):
def __init__(self, database):
if database == 'PASCALContext':
n_classes = 20
cat_names = VOC_CATEGORY_NAMES
has_bg = True
elif database == 'NYUD':
n_classes = 40
cat_names = NYU_CATEGORY_NAMES
has_bg = False
elif database == 'KITTI':
n_classes = 2
cat_names = KITTI_CATEGORY_NAMES
has_bg = False
elif database == 'CITYSCAPES':
n_classes = 2
cat_names = KITTI_CATEGORY_NAMES
has_bg = False
else:
raise NotImplementedError
self.n_classes = n_classes + int(has_bg)
self.cat_names = cat_names
self.tp = [0] * self.n_classes
self.fp = [0] * self.n_classes
self.fn = [0] * self.n_classes
@torch.no_grad()
def update(self, pred, gt):
pred = pred.squeeze()
gt = gt.squeeze()
# valid = (gt != 255)
for i_part in range(0, self.n_classes):
tmp_gt = (gt == i_part)
tmp_pred = (pred == i_part)
# self.tp[i_part] += torch.sum(tmp_gt & tmp_pred & valid).item()
# self.fp[i_part] += torch.sum(~tmp_gt & tmp_pred & valid).item()
# self.fn[i_part] += torch.sum(tmp_gt & ~tmp_pred & valid).item()
self.tp[i_part] += torch.sum(tmp_gt & tmp_pred).item()
self.fp[i_part] += torch.sum(~tmp_gt & tmp_pred).item()
self.fn[i_part] += torch.sum(tmp_gt & ~tmp_pred).item()
def reset(self):
self.tp = [0] * self.n_classes
self.fp = [0] * self.n_classes
self.fn = [0] * self.n_classes
def get_score(self, verbose=True):
jac = [0] * self.n_classes
for i_part in range(self.n_classes):
jac[i_part] = float(self.tp[i_part]) / max(float(self.tp[i_part] + self.fp[i_part] + self.fn[i_part]), 1e-8)
eval_result = dict()
eval_result['jaccards_all_categs'] = jac
eval_result['mIoU'] = np.mean(jac)
# if verbose:
# print('\nSemantic Segmentation mIoU: {0:.4f}\n'.format(100 * eval_result['mIoU']))
# class_IoU = eval_result['jaccards_all_categs']
# for i in range(len(class_IoU)):
# spaces = ''
# for j in range(0, 20 - len(self.cat_names[i])):
# spaces += ' '
# print('{0:s}{1:s}{2:.4f}'.format(self.cat_names[i], spaces, 100 * class_IoU[i]))
return eval_result
def eval_semseg_predictions(database, save_dir, overfit=False):
""" Evaluate the segmentation maps that are stored in the save dir """
# Dataloaders
if database == 'PASCALContext':
from data.pascal_context import PASCALContext
n_classes = 20
cat_names = VOC_CATEGORY_NAMES
has_bg = True
gt_set = 'val'
db = PASCALContext(split=gt_set, do_edge=False, do_human_parts=False, do_semseg=True,
do_normals=False, overfit=overfit)
elif database == 'NYUD':
from data.nyud import NYUD_MT
n_classes = 40
cat_names = NYU_CATEGORY_NAMES
has_bg = False
gt_set = 'val'
db = NYUD_MT(split=gt_set, do_semseg=True, overfit=overfit)
elif database == 'KITTI':
from data.kitti_road_v02 import KITTIRoad
n_classes = 2
cat_names = KITTI_CATEGORY_NAMES
has_bg = False
gt_set = 'val'
db = KITTIRoad(split=gt_set, do_semseg=True, overfit=overfit)
elif database == 'CITYSCAPES':
from data.cityscapes import CityScapes
n_classes = 2
cat_names = KITTI_CATEGORY_NAMES
has_bg = False
gt_set = 'val'
db = CityScapes(split=gt_set, do_semseg=True, overfit=overfit)
else:
raise NotImplementedError
base_name = database + '_' + 'test' + '_semseg'
fname = os.path.join(save_dir, base_name + '.json')
# Eval the model
print('Evaluate the saved images (semseg)')
eval_results = eval_semseg(db, os.path.join(save_dir, 'semseg'), n_classes=n_classes, has_bg=has_bg)
with open(fname, 'w') as f:
json.dump(eval_results, f)
# Print results
class_IoU = eval_results['jaccards_all_categs']
mIoU = eval_results['mIoU']
print('\nSemantic Segmentation mIoU: {0:.4f}\n'.format(100 * mIoU))
for i in range(len(class_IoU)):
spaces = ''
for j in range(0, 15 - len(cat_names[i])):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(cat_names[i], spaces, 100 * class_IoU[i]))
return eval_results