From edb4c2eed205abe92afcc9ff69804ffe449e970c Mon Sep 17 00:00:00 2001 From: Ivan Itzcovich Date: Mon, 13 Aug 2018 17:22:46 -0300 Subject: [PATCH] Add saving file support --- video_client.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/video_client.py b/video_client.py index 1b87f32..20e62de 100644 --- a/video_client.py +++ b/video_client.py @@ -5,8 +5,8 @@ from detector import FaceDetector from utils import annotate_image -YOLO_MODELS_DIR = "/Users/ivanitz/Projects/yolo-face-artifacts/run6/models/" -CORRECTOR_MODELS_DIR = "/Users/ivanitz/Projects/fine-tuned-face/models/" +YOLO_MODELS_DIR = "/home/ivanitz/yolo-face/models/" +CORRECTOR_MODELS_DIR = "/home/ivanitz/face-correction/models/" def run(feed): @@ -18,6 +18,19 @@ def run(feed): else: cap = cv2.VideoCapture(feed) + # Get current width of frame + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float + # Get current height of frame + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float + fps = cap.get(cv2.CAP_PROP_FPS) # float + + + # Define the codec and create VideoWriter object + # fourcc = cv2.Video.CV_FOURCC(*'X264') + fourcc = cv2.VideoWriter_fourcc(*'MJPG') + out = cv2.VideoWriter("output.avi",fourcc, fps, (int(width),int(height))) + + now = time.time() while(cap.isOpened()): # Capture frame-by-frame @@ -28,10 +41,12 @@ def run(feed): print("FPS: {:0.2f}".format(1 / (time.time() - now))) ann_frame = annotate_image(frame, bboxes) + out.write(ann_frame) + # Display the resulting frame - cv2.imshow('frame', ann_frame) - if cv2.waitKey(1) & 0xFF == ord('q'): - break + # cv2.imshow('frame', ann_frame) + # if cv2.waitKey(1) & 0xFF == ord('q'): + # break # When everything done, release the capture cap.release()