Skip to content

Commit

Permalink
Simple Python example with Motion detection and tracking
Browse files Browse the repository at this point in the history
  • Loading branch information
Nuzhny007 committed Apr 7, 2022
1 parent 862cc75 commit e7681f4
Show file tree
Hide file tree
Showing 15 changed files with 963 additions and 1,069 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.9)

project(MTTracking VERSION 1.0.2)
project(MTTracking VERSION 1.1.0)

unset(CMAKE_C_FLAGS CACHE)
unset(CMAKE_CXX_FLAGS CACHE)
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@

# Last changes

* Robust tracking a small and fast objects with background subtraction algorithm
* Works Python example with Motion detector

* TensorRT 8 for YOLO detectors
* Robust tracking a small and fast objects with background subtraction algorithm

# New videos!

Expand Down
64 changes: 37 additions & 27 deletions demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,20 @@

def draw_regions(img, regions, color):
for reg in regions:
cv.rectangle(img, (reg.x(), reg.y(), reg.width(), reg.height()), color, 2)
brect = reg.brect
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), color, 2)


def draw_tracks(img, tracks, fps):
for track in tracks:
brect = track.GetBoundingRect()
if track.isStatic:
cv.rectangle(img, (track.x(), track.y(), track.width(), track.height()), (255, 0, 255), 2)
elif track.IsRobust(int(fps / 4), 0.7, 0.1, 10.):
cv.rectangle(img, (track.x(), track.y(), track.width(), track.height()), (0, 255, 0), 2)
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), (255, 0, 255), 2)
elif track.IsRobust(int(fps / 4), 0.7, (0.1, 10.)):
cv.rectangle(img, (brect.x, brect.y, brect.width, brect.height), (0, 255, 0), 2)
trajectory = track.GetTrajectory()
for i in range(0, len(trajectory) - 1):
cv.line(img, trajectory[i], trajectory[i+1], (0, 255, 0), 1)


def main():
Expand All @@ -32,57 +37,62 @@ def main():
cam = cv.VideoCapture(video_src)

_ret, img = cam.read()
print("cam.read res = ", _ret, ", im size = ", img.shape)

fps = cam.get(cv.CAP_PROP_FPS)

configBGFG = mt.MapStringString()
configBGFG['samples'] = '20'
configBGFG["pixelNeighbor"] = "3"
configBGFG["distanceThreshold"] = "18"
configBGFG["matchingThreshold"] = "3"
configBGFG["updateFactor"] = "16"
mdetector = mt.BaseDetector(mt.BaseDetector.Detectors.VIBE, configBGFG, img)
mdetector.Init(configBGFG)
mdetector.SetMinObjectSize(int(img.shape[0] / 100), int(img.shape[0] / 100))
print(mdetector.CanGrayProcessing())
print(video_src, " fps = ", fps)

configBGFG = mt.KeyVal()
configBGFG.Add('useRotatedRect', '20')
configBGFG.Add('history', '1000')
configBGFG.Add("nmixtures", "3")
configBGFG.Add("backgroundRatio", "0.7")
configBGFG.Add("noiseSigma", "0")
print("configBGFG = ", configBGFG)
mdetector = mt.BaseDetector(mt.BaseDetector.Detectors.MOG, configBGFG, img)
print("CanGrayProcessing: ", mdetector.CanGrayProcessing())
mdetector.SetMinObjectSize((1, 1))

tracker_settings = mt.TrackerSettings()

tracker_settings.SetDistance(mt.MTracker.DistRects)
tracker_settings.kalmanType = mt.MTracker.KalmanLinear
tracker_settings.filterGoal = mt.MTracker.FilterCenter
tracker_settings.lostTrackType = mt.MTracker.TrackCSRT
tracker_settings.lostTrackType = mt.MTracker.TrackNone
tracker_settings.matchType = mt.MTracker.MatchHungrian
tracker_settings.useAcceleration = False
tracker_settings.dt = 0.2
tracker_settings.accelNoiseMag = 0.2
tracker_settings.dt = 0.5
tracker_settings.accelNoiseMag = 0.1
tracker_settings.distThres = 0.95
tracker_settings.minAreaRadiusPix = -1.
tracker_settings.minAreaRadiusPix = img.shape[0] / 5.
tracker_settings.minAreaRadiusK = 0.8
tracker_settings.useAbandonedDetection = True
tracker_settings.minStaticTime = 3
tracker_settings.maxStaticTime = 3 * tracker_settings.minStaticTime
tracker_settings.maximumAllowedSkippedFrames = int(tracker_settings.minStaticTime * fps)
tracker_settings.maxTraceLength = 2 * tracker_settings.maximumAllowedSkippedFrames
tracker_settings.useAbandonedDetection = False
tracker_settings.maximumAllowedSkippedFrames = int(2 * fps)
tracker_settings.maxTraceLength = int(2 * fps)

mtracker = mt.MTracker(tracker_settings)

while True:
_ret, img = cam.read()
if _ret:
print("cam.read res = ", _ret, ", im size = ", img.shape, ", fps = ", fps)
else:
break

mdetector.Detect(img)
regions = mdetector.GetDetects()
print("mdetector.Detect:", len(regions))

mtracker.Update(regions, img, fps)
tracks = mtracker.GetTracks()
print("detects:", len(regions), ", tracks:", len(tracks))
print("mtracker.Update:", len(tracks))

vis = img.copy()
# draw_regions(vis, rects, (255, 0, 255))
# draw_regions(vis, regions, (255, 0, 255))
draw_tracks(vis, tracks, fps)
cv.imshow('detect', vis)

if cv.waitKey(1) == 27:
if cv.waitKey(int(1000 / fps)) == 27:
break

print('Done')
Expand Down
Loading

0 comments on commit e7681f4

Please sign in to comment.