-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate_ood.py
281 lines (220 loc) · 9.81 KB
/
evaluate_ood.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
import os
import torch
import torch.nn.functional as F
import numpy as np
import argparse
import pickle
import matplotlib.image as mpimg
import albumentations as A
from pathlib import Path
from albumentations.pytorch import ToTensorV2
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets.cityscapes import Cityscapes
from datasets.bdd100k import BDD100KSeg
from datasets.road_anomaly import RoadAnomaly
from datasets.fishyscapes import FishyscapesLAF, FishyscapesStatic
from datasets.lost_and_found import LostAndFound
from train_net import Trainer, setup
from detectron2.checkpoint import DetectionCheckpointer
from pprint import pprint
from support import get_datasets, OODEvaluator
from easydict import EasyDict as edict
parser = argparse.ArgumentParser(description='OOD Evaluation')
parser.add_argument('--batch_size', type=int, default=1,
help="Batch Size used in evaluation")
parser.add_argument('--num_workers', type=int, default=8,
help="Number of threads used in data loader")
parser.add_argument('--device', type=str, default='cuda',
help="cpu or cuda, the device used for evaluation")
parser.add_argument('--out_path', type=str, default='results',
help='output file for saving the results as a pickel file')
parser.add_argument('--verbose', type=bool, default=True,
help="If True, the records will be printed every time they are saved")
parser.add_argument('--datasets_folder', type=str, default='./datasets/',
help='the path to the folder that contains all datasets for evaluation')
parser.add_argument('--models_folder', type=str, default='ckpts/',
help='the path that contains the models to be evaluated')
parser.add_argument('--model_mode', type=str, default='all',
help="""One of [all, selective]. Defines which models to evaluate, the default behavior is all, which is to
evaluate all models in model_logs dir. You can also choose particular models
for evaluation, in which case you need to pass the names of the models to --selected_models""")
parser.add_argument("--selected_models", nargs="*", type=str, default=[],
help="Names of models to be evaluated, these should be name of directories in model_logs")
parser.add_argument('--dataset_mode', type=str, default='all',
help="""One of [all, selective]. Defines which datasets to evaluate on, the default behavior is all, which is to
evaluate all available datasets. You can also choose particular datasets
for evaluation, in which case you need to pass the names of the datasets to --selected_datasets.
Available Datasets are: [
road_anomaly,
fishyscapes_laf,
]
""")
parser.add_argument("--selected_datasets", nargs="*", type=str, default=[],
help="""Names of datasets to be evaluated.
Available Datasets are: [
road_anomaly,
fishyscapes_laf,
]
""")
parser.add_argument("--score_func", type=str, default="rba", choices=["rba", "pebal", "dense_hybrid"],
help="outlier scoring function to be used in evaluations")
args = parser.parse_args()
DATASETS = get_datasets(args.datasets_folder)
dataset_group = [(name, dataset) for (name, dataset) in DATASETS.items() ]
# filter dataset group according to chosen option
if args.dataset_mode == 'selective':
dataset_group = [g for g in dataset_group if g[0]
in args.selected_datasets]
if len(dataset_group) == 0:
raise ValueError(
"Selective Mode is chosen but number of selected datasets is 0")
else:
dataset_group = [g for g in dataset_group if g[0] in [
'road_anomaly', 'fishyscapes_laf', 'fs_static',
'road_anomaly_21', 'road_obstacles',
'cs_synth', 'idd_synth', 'acdc_synth'
]]
print("Datasets to be evaluated:")
[print(g[0], g[1].__len__(), 'images') for g in dataset_group]
print("-----------------------")
# Dictionary for saving the results
# records will be a nested dictionary with the following hierarchy:
# - model_name:
# - dataset_name:
# - metric_name:
# -mean:
# -std:
# -value: this shows the value without bootstrapping
# Device for computation
if args.device == 'cuda' and (not torch.cuda.is_available()):
print("Warning: Cuda is requested but cuda is not available. CPU will be used.")
args.device = 'cpu'
DEVICE = torch.device(args.device)
def get_model(config_path, model_path):
"""
Creates a Mask2Former model give a config path and ckpt path
"""
args = edict({'config_file': config_path, 'eval-only': True, 'opts': [
"OUTPUT_DIR", "output/",
]})
config = setup(args)
model = Trainer.build_model(config)
DetectionCheckpointer(model, save_dir=config.OUTPUT_DIR).resume_or_load(
model_path, resume=False
)
model.to(DEVICE)
_ = model.eval()
return model
def get_logits(model, x, **kwargs):
"""
Extracts the logits of a single image from Mask2Former Model. Works only for a single image currently.
Expected input:
- x: torch.Tensor of shape (1, 3, H, W)
Expected output:
- Logits (torch.Tensor) of shape (1, 19, H, W)
"""
with torch.no_grad():
out = model([{"image": x[0].to(DEVICE)}])
return out[0]['sem_seg'].unsqueeze(0)
def get_RbA(model, x, **kwargs):
with torch.no_grad():
out = model([{"image": x[0].to(DEVICE)}])
logits = out[0]['sem_seg']
return -logits.tanh().sum(dim=0)
def get_energy(model, x, **kwargs):
with torch.no_grad():
out = model([{"image": x[0].cuda()}])
logits = out[0]['sem_seg']
return -torch.logsumexp(logits, dim=0)
def get_densehybrid_score(model, x, **kwargs):
with torch.no_grad():
out, ood_pred = model([{"image": x[0].cuda()}], return_ood_pred=True)
logits = out[0]['sem_seg']
out = F.softmax(ood_pred, dim=1)
p1 = torch.logsumexp(logits, dim=0)
p2 = out[:, 1] # p(~din|x)
probs = (- p1) + (p2 + 1e-9).log()
conf_probs = probs
return conf_probs
def save_dict(d, name):
"""
Save the records into args.out_path.
Print the records to console if verbose=True
"""
if args.verbose:
pprint(d)
store_path = os.path.join(args.out_path, name)
Path(store_path).mkdir(exist_ok=True, parents=True)
with open(os.path.join(store_path, f'results.pkl'), 'wb') as f:
pickle.dump(d, f)
def current_result_exists(model_name):
"""
Check if the current results exist in the args.out_path
"""
store_path = os.path.join(args.out_path, model_name)
return os.path.exists(os.path.join(store_path, f'results.pkl'))
def run_evaluations(model, dataset, model_name, dataset_name):
"""
Run evaluations for a particular model over all designated datasets.
"""
score_func = None
if args.score_func == "rba":
score_func = get_RbA
elif args.score_func == "pebal":
score_func = get_energy
elif args.score_func == "dense_hybrid":
score_func = get_densehybrid_score
evaluator = OODEvaluator(model, get_logits, score_func)
loader = DataLoader(
dataset, shuffle=False, batch_size=args.batch_size, num_workers=args.num_workers)
anomaly_score, ood_gts = evaluator.compute_anomaly_scores(
loader=loader,
device=DEVICE,
return_preds=False,
upper_limit=1300
)
metrics = evaluator.evaluate_ood(
anomaly_score=anomaly_score,
ood_gts=ood_gts,
verbose=False
)
return metrics
def main():
# The name of every directory inside args.models_folder is expected to be the model name.
# Inside a model's folder there should be 2 files (doesn't matter if there are extra stuff).
# these 2 files are: config.yaml and [model_final.pth or model_final.pkl]
models_list = os.listdir(args.models_folder)
models_list = [m for m in models_list if os.path.isdir(
os.path.join(args.models_folder, m))]
print(args.models_folder)
if args.model_mode == 'selective':
models_list = [m for m in models_list if m in args.selected_models]
if len(models_list) == 0:
raise ValueError(
"Number of models chosen is 0, either model_logs folder is empty or no models were selected")
print("Evaluating the following Models:")
[print(m) for m in models_list]
print("-----------------------")
for model_name in models_list:
experiment_path = os.path.join(args.models_folder, model_name)
results = edict()
config_path = os.path.join(experiment_path, 'config.yaml')
model_path= os.path.join(experiment_path, 'model_final.pth')
if current_result_exists(model_name):
print(f"Skipping {model_name} because results already exist, if you want to re-run, delete the results.pkl file")
continue
if not os.path.exists(model_path):
model_path = os.path.join(
'model_logs', model_name, 'model_final.pkl')
if not os.path.exists(model_path):
print("Model path does not exist, skipping")
continue
model = get_model(config_path=config_path, model_path=model_path)
for dataset_name, dataset in dataset_group:
if dataset_name not in results:
results[dataset_name] = edict()
results[dataset_name] = run_evaluations(model, dataset, model_name, dataset_name)
save_dict(results, model_name)
if __name__ == '__main__':
main()