diff --git a/chapter8/camshift.py b/chapter8/camshift.py new file mode 100644 index 0000000..9fb4a13 --- /dev/null +++ b/chapter8/camshift.py @@ -0,0 +1,42 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) + +# take first frame of the video +ret,frame = cap.read() + +# setup initial location of window +r,h,c,w = 300,200,400,300 # simply hardcoded the values +track_window = (c,r,w,h) + + +roi = frame[r:r+h, c:c+w] +hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) +mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.))) +roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) +cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) +term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) + +while(1): + ret ,frame = cap.read() + + if ret == True: + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) + + ret, track_window = cv2.CamShift(dst, track_window, term_crit) + pts = cv2.boxPoints(ret) + pts = np.int0(pts) + img2 = cv2.polylines(frame,[pts],True, 255,2) + + cv2.imshow('img2',img2) + k = cv2.waitKey(60) & 0xff + if k == 27: + break + + else: + break + +cv2.destroyAllWindows() +cap.release() diff --git a/chapter8/hist.py b/chapter8/hist.py new file mode 100644 index 0000000..efe343a --- /dev/null +++ b/chapter8/hist.py @@ -0,0 +1,21 @@ +import cv2 +import numpy as np +from matplotlib import pyplot as plt + +camera = cv2.VideoCapture(0) + +while True: + ret, img = camera.read() + color = ('b','g','r') + for i,col in enumerate(color): + histr = cv2.calcHist([img],[i],None,[256],[0,256]) + plt.plot(histr,color = col) + plt.xlim([0,256]) + plt.show() + #cv2.imshow("frame", img) + # k = cv2.waitKey(30) & 0xff + # if k == 27: + # break + +camera.release() +cv2.destroyAllWindows() diff --git a/chapter8/kalman.py b/chapter8/kalman.py new file mode 100644 index 0000000..b099f29 --- /dev/null +++ b/chapter8/kalman.py @@ -0,0 +1,48 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) + +# take first frame of the video +ret,frame = cap.read() + +# setup initial location of window +r,h,c,w = 300,200,400,300 # simply hardcoded the values +track_window = (c,r,w,h) + + +roi = frame[r:r+h, c:c+w] +hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) +mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.))) +roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) +cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) +term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) + +kalman = cv2.KalmanFilter(2,1,0) + +while(1): + ret ,frame = cap.read() + + if ret == True: + + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + kalman.predict(hsv) + dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) + + ret, track_window = cv2.CamShift(dst, track_window, term_crit) + x, y, w, h = track_window + kalman.correct(frame) + pts = cv2.boxPoints(ret) + pts = np.int0(pts) + img2 = cv2.polylines(frame,[pts],True, 255,2) + kalman.correct(hsv) + cv2.imshow('img2',img2) + k = cv2.waitKey(60) & 0xff + if k == 27: + break + + else: + break + +cv2.destroyAllWindows() +cap.release() diff --git a/chapter8/lk.py b/chapter8/lk.py new file mode 100644 index 0000000..05addf2 --- /dev/null +++ b/chapter8/lk.py @@ -0,0 +1,57 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) + +# params for ShiTomasi corner detection +feature_params = dict( maxCorners = 100, + qualityLevel = 0.3, + minDistance = 7, + blockSize = 7 ) + +# Parameters for lucas kanade optical flow +lk_params = dict( winSize = (15,15), + maxLevel = 2, + criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + +# Create some random colors +color = np.random.randint(0,255,(100,3)) + +# Take first frame and find corners in it +ret, old_frame = cap.read() +old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) +p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) + +# Create a mask image for drawing purposes +mask = np.zeros_like(old_frame) + +while(1): + ret,frame = cap.read() + frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # calculate optical flow + p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) + + # Select good points + good_new = p1[st==1] + good_old = p0[st==1] + + # draw the tracks + for i,(new,old) in enumerate(zip(good_new,good_old)): + a,b = new.ravel() + c,d = old.ravel() + mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) + frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) + img = cv2.add(frame,mask) + + cv2.imshow('frame',img) + k = cv2.waitKey(30) & 0xff + if k == 27: + break + + # Now update the previous frame and previous points + old_gray = frame_gray.copy() + p0 = good_new.reshape(-1,1,2) + +cv2.destroyAllWindows() +cap.release() diff --git a/chapter8/meanshift.jpg b/chapter8/meanshift.jpg new file mode 100644 index 0000000..701a169 Binary files /dev/null and b/chapter8/meanshift.jpg differ diff --git a/chapter8/meanshift.py b/chapter8/meanshift.py new file mode 100644 index 0000000..b9f8bef --- /dev/null +++ b/chapter8/meanshift.py @@ -0,0 +1,48 @@ +import numpy as np +import cv2 + +cap = cv2.VideoCapture(0) +# capture the first frame +ret,frame = cap.read() +# mark the ROI +r,h,c,w = 10, 200, 10, 200 +# wrap in a tuple +track_window = (c,r,w,h) + +# extract the ROI for tracking +roi = frame[r:r+h, c:c+w] +# switch to HSV +hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) +# create a mask with upper and lower boundaries of colors you want to track +mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.))) +# calculate histograms of roi +roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) +cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) + +# Setup the termination criteria, either 10 iteration or move by atleast 1 pt +term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) + +while(1): + ret ,frame = cap.read() + + if ret == True: + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) + print dst + # apply meanshift to get the new location + ret, track_window = cv2.meanShift(dst, track_window, term_crit) + + # Draw it on image + x,y,w,h = track_window + img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2) + cv2.imshow('img2',img2) + + k = cv2.waitKey(60) & 0xff + if k == 27: + break + + else: + break + +cv2.destroyAllWindows() +cap.release() diff --git a/chapter8/movie.mpg b/chapter8/movie.mpg new file mode 100644 index 0000000..7244c91 Binary files /dev/null and b/chapter8/movie.mpg differ diff --git a/chapter8/q.jpg b/chapter8/q.jpg new file mode 100644 index 0000000..99e2e61 Binary files /dev/null and b/chapter8/q.jpg differ diff --git "a/chapter8/\377.jpg" "b/chapter8/\377.jpg" new file mode 100644 index 0000000..a1a5e33 Binary files /dev/null and "b/chapter8/\377.jpg" differ