Skip to content

Commit 483c6ab

Browse files
committed
frame similarity comparison using eigenvalue
1 parent 0f20f57 commit 483c6ab

File tree

2 files changed

+57
-4
lines changed

2 files changed

+57
-4
lines changed

src/lib/opts.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,8 @@ def __init__(self):
120120
self.parser.add_argument('--output-format', type=str, default='video', help='video or text')
121121
self.parser.add_argument('--output-root', type=str, default='../demos', help='expected output root path')
122122
self.parser.add_argument('--custom_video', default=False, help='is custom video provided')
123-
self.parser.add_argument('--skip_frames', default=1, help='how frequently to skip frames during detection 0: no skiping 1: 1/2skipped 2: 2/3 skipped 3: 3/4 skipped')
123+
self.parser.add_argument('--skip_frames', default=1, help='how frequently to skip frames during detection 0: no skiping 1: 1/2 skipped 2: 2/3 skipped 3: 3/4 skipped')
124+
self.parser.add_argument('--eigen_threshold', default=10, help='threshold of similarity till which detection can be skipped')
124125
# mot
125126
self.parser.add_argument('--data_cfg', type=str,
126127
default='../src/lib/cfg/data.json',

src/track.py

+55-3
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import motmetrics as mm
1212
import numpy as np
1313
import torch
14+
from PIL import Image
1415

1516
from tracker.multitracker import JDETracker
1617
from tracking_utils import visualization as vis
@@ -75,37 +76,72 @@ def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_im
7576
results = []
7677
frame_id = 0
7778
prev_online_targets = []
78-
skip_rate = int(opt.skip_frames) + 1
79+
prev_img = None
80+
eigen_threshold = int(opt.eigen_threshold)
81+
num_detect = 0
82+
num_skipped = 0
83+
prev_area = 0
84+
total_areas = []
85+
largest_areas = []
7986
#for path, img, img0 in dataloader:
8087
for i, (path, img, img0) in enumerate(dataloader):
8188
#if i % 8 != 0:
82-
#continue
89+
#continue
8390
if frame_id % 20 == 0:
8491
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
92+
93+
8594

8695
# run tracking
8796
timer.tic()
97+
98+
if i > 0 :
99+
eig = compute_eigen_values_consecutive(prev_img, img0)
100+
else:
101+
eig = 1000
102+
print('eig_', i ,": ", eig)
103+
88104
if use_cuda:
89105
blob = torch.from_numpy(img).cuda().unsqueeze(0)
90106
else:
91107
blob = torch.from_numpy(img).unsqueeze(0)
92-
if frame_id % skip_rate == 0:
108+
109+
if eig >= eigen_threshold:
93110
online_targets = tracker.update(blob, img0)
94111
prev_online_targets = online_targets
112+
prev_img = img0
113+
num_detect+=1
114+
print('detect at ', i, ' prev_area: ', prev_area)
95115
else:
116+
#eig = compute_eigen_values_consecutive(prev_img, img0)
96117
online_targets = prev_online_targets
118+
num_skipped+=1
97119
online_tlwhs = []
98120
online_ids = []
99121
#online_scores = []
122+
tot_area = 0
123+
max_area = -1
100124
for t in online_targets:
101125
tlwh = t.tlwh
102126
tid = t.track_id
103127
vertical = tlwh[2] / tlwh[3] > 1.6
128+
104129
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
105130
online_tlwhs.append(tlwh)
106131
online_ids.append(tid)
132+
curr_area = tlwh[2] * tlwh[3]
133+
tot_area += curr_area
107134
#online_scores.append(t.score)
135+
if curr_area > max_area:
136+
max_area = curr_area
137+
138+
prev_area = tot_area
139+
largest_areas.append(max_area)
140+
total_areas.append(tot_area)
108141
timer.toc()
142+
print('largest_areas:', largest_areas)
143+
print('total_areas:', total_areas)
144+
109145
# save results
110146
results.append((frame_id + 1, online_tlwhs, online_ids))
111147
#results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
@@ -118,10 +154,26 @@ def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_im
118154
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
119155
frame_id += 1
120156
# save results
157+
print('num_detect:', num_detect, "num_skipped:", num_skipped)
121158
write_results(result_filename, results, data_type)
122159
#write_results_score(result_filename, results, data_type)
123160
return frame_id, timer.average_time, timer.calls
124161

162+
def get_image_as_array(img):
163+
image_1 = Image.fromarray(img)
164+
imgGray = image_1.convert('L')
165+
img_gray = np.array(imgGray)
166+
img_part = img_gray
167+
img_part = img_part.reshape(-1)
168+
return img_part
169+
170+
def compute_eigen_values_consecutive(image1, image2):
171+
img1 = get_image_as_array(image1)
172+
img2 = get_image_as_array(image2)
173+
cova_1 = np.cov(img1, img2)
174+
eig_1, eig_vec_1 = np.linalg.eig(cova_1)
175+
eig = np.sort(eig_1)
176+
return eig[0]
125177

126178
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
127179
save_images=False, save_videos=False, show_image=True):

0 commit comments

Comments
 (0)