forked from spmallick/learnopencv
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathedge-detector.py
98 lines (77 loc) · 3.46 KB
/
edge-detector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import cv2
import depthai as dai
# Create pipeline
pipeline = dai.Pipeline()
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
edgeDetectorLeft = pipeline.create(dai.node.EdgeDetector)
edgeDetectorRight = pipeline.create(dai.node.EdgeDetector)
edgeDetectorRgb = pipeline.create(dai.node.EdgeDetector)
xoutEdgeLeft = pipeline.create(dai.node.XLinkOut)
xoutEdgeRight = pipeline.create(dai.node.XLinkOut)
xoutEdgeRgb = pipeline.create(dai.node.XLinkOut)
xinEdgeCfg = pipeline.create(dai.node.XLinkIn)
edgeLeftStr = "edge left"
edgeRightStr = "edge right"
edgeRgbStr = "edge rgb"
edgeCfgStr = "edge cfg"
xoutEdgeLeft.setStreamName(edgeLeftStr)
xoutEdgeRight.setStreamName(edgeRightStr)
xoutEdgeRgb.setStreamName(edgeRgbStr)
xinEdgeCfg.setStreamName(edgeCfgStr)
# Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight())
# Linking
monoLeft.out.link(edgeDetectorLeft.inputImage)
monoRight.out.link(edgeDetectorRight.inputImage)
camRgb.video.link(edgeDetectorRgb.inputImage)
edgeDetectorLeft.outputImage.link(xoutEdgeLeft.input)
edgeDetectorRight.outputImage.link(xoutEdgeRight.input)
edgeDetectorRgb.outputImage.link(xoutEdgeRgb.input)
xinEdgeCfg.out.link(edgeDetectorLeft.inputConfig)
xinEdgeCfg.out.link(edgeDetectorRight.inputConfig)
xinEdgeCfg.out.link(edgeDetectorRgb.inputConfig)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
# Output/input queues
edgeLeftQueue = device.getOutputQueue(edgeLeftStr, 8, False)
edgeRightQueue = device.getOutputQueue(edgeRightStr, 8, False)
edgeRgbQueue = device.getOutputQueue(edgeRgbStr, 8, False)
edgeCfgQueue = device.getInputQueue(edgeCfgStr)
print("Switch between sobel filter kernels using keys '1' and '2'")
while(True):
edgeLeft = edgeLeftQueue.get()
edgeRight = edgeRightQueue.get()
edgeRgb = edgeRgbQueue.get()
edgeLeftFrame = edgeLeft.getFrame()
edgeRightFrame = edgeRight.getFrame()
edgeRgbFrame = edgeRgb.getFrame()
# Show the frame
cv2.imshow(edgeLeftStr, edgeLeftFrame)
cv2.imshow(edgeRightStr, edgeRightFrame)
cv2.imshow(edgeRgbStr, edgeRgbFrame)
key = cv2.waitKey(1)
if key == 27:
break
if key == ord('1'):
print("Switching sobel filter kernel.")
cfg = dai.EdgeDetectorConfig()
sobelHorizontalKernel = [[1, 0, -1], [2, 0, -2], [1, 0, -1]]
sobelVerticalKernel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel)
edgeCfgQueue.send(cfg)
if key == ord('2'):
print("Switching sobel filter kernel.")
cfg = dai.EdgeDetectorConfig()
sobelHorizontalKernel = [[3, 0, -3], [10, 0, -10], [3, 0, -3]]
sobelVerticalKernel = [[3, 10, 3], [0, 0, 0], [-3, -10, -3]]
cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel)
edgeCfgQueue.send(cfg)