forked from ohhhyeahhh/SiamCAR
-
Notifications
You must be signed in to change notification settings - Fork 0
/
eval.py
104 lines (95 loc) · 4.64 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from toolkit.datasets import OTBDataset, UAVDataset, LaSOTDataset
from toolkit.evaluation import OPEBenchmark
parser = argparse.ArgumentParser(description='tracking evaluation')
parser.add_argument('--tracker_path', '-p', type=str, default='./results',
help='tracker result path')
parser.add_argument('--dataset', '-d', type=str, default='UAV123',
help='dataset name')
parser.add_argument('--num', '-n', default=1, type=int,
help='number of thread to eval')
parser.add_argument('--tracker_prefix', '-t', default='snapshot',
type=str, help='tracker name')
parser.add_argument('--show_video_level', '-s', dest='show_video_level',
action='store_true')
parser.set_defaults(show_video_level=False)
args = parser.parse_args()
def main():
tracker_dir = os.path.join(args.tracker_path, args.dataset)
trackers = glob(os.path.join(args.tracker_path,
args.dataset,
args.tracker_prefix+'*'))
trackers = [x.split('/')[-1] for x in trackers]
assert len(trackers) > 0
args.num = min(args.num, len(trackers))
root = os.path.realpath(os.path.join(os.path.dirname(__file__),
'../testing_dataset'))
root = os.path.join(root, args.dataset)
if 'OTB' in args.dataset:
dataset = OTBDataset(args.dataset, root)
dataset.set_tracker(tracker_dir, trackers)
benchmark = OPEBenchmark(dataset)
success_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
trackers), desc='eval success', total=len(trackers), ncols=100):
success_ret.update(ret)
precision_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
trackers), desc='eval precision', total=len(trackers), ncols=100):
precision_ret.update(ret)
benchmark.show_result(success_ret, precision_ret,
show_video_level=args.show_video_level)
elif 'LaSOT' == args.dataset:
dataset = LaSOTDataset(args.dataset, root)
dataset.set_tracker(tracker_dir, trackers)
benchmark = OPEBenchmark(dataset)
success_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
trackers), desc='eval success', total=len(trackers), ncols=100):
success_ret.update(ret)
precision_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
trackers), desc='eval precision', total=len(trackers), ncols=100):
precision_ret.update(ret)
norm_precision_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
trackers), desc='eval norm precision', total=len(trackers), ncols=100):
norm_precision_ret.update(ret)
benchmark.show_result(success_ret, precision_ret, norm_precision_ret,
show_video_level=args.show_video_level)
elif 'UAV' in args.dataset:
dataset = UAVDataset(args.dataset, root)
dataset.set_tracker(tracker_dir, trackers)
benchmark = OPEBenchmark(dataset)
success_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_success,
trackers), desc='eval success', total=len(trackers), ncols=100):
success_ret.update(ret)
precision_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_precision,
trackers), desc='eval precision', total=len(trackers), ncols=100):
precision_ret.update(ret)
norm_precision_ret = {}
with Pool(processes=args.num) as pool:
for ret in tqdm(pool.imap_unordered(benchmark.eval_norm_precision,
trackers), desc='eval norm precision', total=len(trackers), ncols=100):
norm_precision_ret.update(ret)
benchmark.show_result(success_ret, precision_ret, norm_precision_ret,
show_video_level=args.show_video_level)
if __name__ == '__main__':
main()