From 6120218f9b5338fb93a07adb92813826a9b9ccbf Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 4 Feb 2021 03:37:11 +0200 Subject: [PATCH 01/66] Add UVC node bindings --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 6 ++++++ src/pipeline/PipelineBindings.cpp | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 6be0cab6a..9d3aa5c67 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 6be0cab6a9c8c68d71e2a0ec800b0e09e5c7cc06 +Subproject commit 9d3aa5c67c12d507f3497213e4fbea00f9f7be33 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index a1177bfde..2ac8652c6 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -12,6 +12,7 @@ #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" +#include "depthai/pipeline/node/UVC.hpp" // Libraries #include "hedley/hedley.h" @@ -299,6 +300,11 @@ void NodeBindings::bind(pybind11::module& m){ .def("setRate", &SystemLogger::setRate) ; + // UVC node + py::class_>(m, "UVC") + .def_readonly("input", &UVC::input) + ; + //////////////////////////////////// // Node properties bindings diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 306f30548..eebfed5ff 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -14,6 +14,7 @@ #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" +#include "depthai/pipeline/node/UVC.hpp" // depthai-shared #include "depthai-shared/pb/properties/GlobalProperties.hpp" @@ -65,6 +66,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createStereoDepth", &Pipeline::create) .def("createMobileNetDetectionNetwork", &Pipeline::create) .def("createYoloDetectionNetwork", &Pipeline::create) + .def("createUVC", &Pipeline::create) ; From fe7966d6c9d38c1416f98c07ce690f22d926be65 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 4 Feb 2021 03:57:55 +0200 Subject: [PATCH 02/66] Add UVC example, streaming NV12 from ColorCamera `video` --- examples/19_uvc_video.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100755 examples/19_uvc_video.py diff --git a/examples/19_uvc_video.py b/examples/19_uvc_video.py new file mode 100755 index 000000000..5a320aa24 --- /dev/null +++ b/examples/19_uvc_video.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +import depthai as dai +import time + +# Start defining a pipeline +pipeline = dai.Pipeline() + +# Define a source - color camera +cam_rgb = pipeline.createColorCamera() +cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) +cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) +cam_rgb.setInterleaved(False) + +# Create an UVC (USB Video Class) output node +uvc = pipeline.createUVC() +cam_rgb.video.link(uvc.input) + +# Pipeline defined, now the device is connected to +with dai.Device(pipeline) as device: + # Start pipeline + device.startPipeline() + + print("\nDevice started, please keep this process running") + print("and open an UVC viewer. Example on Linux:") + print(" guvcview -d /dev/video0") + print("\nTo close: Ctrl+C") + + while True: + try: + time.sleep(0.1) + except KeyboardInterrupt: + break From 8b2d21a31afeb735f09e957431b44b3f7dcc42ce Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 7 Jan 2021 06:07:42 +0200 Subject: [PATCH 03/66] ColorCamera add bindings: `isp`/`raw` outputs --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 9d3aa5c67..3088124a6 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 9d3aa5c67c12d507f3497213e4fbea00f9f7be33 +Subproject commit 3088124a6af64d72d490f60d97e27f590b795784 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 2ac8652c6..b031c43fe 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -94,6 +94,8 @@ void NodeBindings::bind(pybind11::module& m){ .def_readonly("video", &ColorCamera::video) .def_readonly("preview", &ColorCamera::preview) .def_readonly("still", &ColorCamera::still) + .def_readonly("isp", &ColorCamera::isp) + .def_readonly("raw", &ColorCamera::raw) .def("setCamId", [](ColorCamera& c, int64_t id) { // Issue an deprecation warning PyErr_WarnEx(PyExc_DeprecationWarning, "setCamId() is deprecated, use setBoardSocket() instead.", 1); From 320712e14c99f6c86439dbf6f4b9abb0f4826a65 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 12 Feb 2021 01:06:02 +0200 Subject: [PATCH 04/66] Update core: ColorCamera setIspScale --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 6e4906911..97249f8e5 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 6e4906911a94096cd393daaff496eea06c1af24b +Subproject commit 97249f8e5301a6f4532d5d3c9d565a4ebfb0d53e diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 36de61c5a..123862923 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -151,6 +151,8 @@ void NodeBindings::bind(pybind11::module& m){ .def("getWaitForConfigInput", &ColorCamera::getWaitForConfigInput) .def("setPreviewKeepAspectRatio", &ColorCamera::setPreviewKeepAspectRatio) .def("getPreviewKeepAspectRatio", &ColorCamera::getPreviewKeepAspectRatio) + .def("setIspScale", &ColorCamera::setIspScale) + .def("setIspScaleFull", &ColorCamera::setIspScaleFull) ; // NeuralNetwork node From e70becfe1c1908c6148f842006b9986ebab39cab Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 12 Feb 2021 01:07:00 +0200 Subject: [PATCH 05/66] UVC example: enable 4K, downscaled to 1080p, fixed focus --- examples/19_uvc_video.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/19_uvc_video.py b/examples/19_uvc_video.py index 5a320aa24..86adc461c 100755 --- a/examples/19_uvc_video.py +++ b/examples/19_uvc_video.py @@ -3,18 +3,26 @@ import depthai as dai import time +enable_4k = True + # Start defining a pipeline pipeline = dai.Pipeline() # Define a source - color camera cam_rgb = pipeline.createColorCamera() cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) -cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam_rgb.setInterleaved(False) +cam_rgb.initialControl.setManualFocus(130) + +if enable_4k: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) + cam_rgb.setIspScale(1, 2) +else: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) # Create an UVC (USB Video Class) output node uvc = pipeline.createUVC() -cam_rgb.video.link(uvc.input) +cam_rgb.isp.link(uvc.input) # Pipeline defined, now the device is connected to with dai.Device(pipeline) as device: From 0c2f38b8117e6907550b7eab1b420bf3418338ea Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 2 May 2021 23:15:39 +0300 Subject: [PATCH 06/66] Update UVC FW, update example: input changed from ColorCamera `isp` (YUV420p) to `video` (NV12) - as macOS doesn't work with YUV420p --- depthai-core | 2 +- examples/19_uvc_video.py | 4 ++-- src/pipeline/NodeBindings.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/depthai-core b/depthai-core index f3f4f2139..4eb7f7683 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit f3f4f2139cf56fc6fb8cc1fe52cb923d0f50cfd2 +Subproject commit 4eb7f76833af60ae9ec264314041b4ab41bbe5f6 diff --git a/examples/19_uvc_video.py b/examples/19_uvc_video.py index 86adc461c..eb8be3a9a 100755 --- a/examples/19_uvc_video.py +++ b/examples/19_uvc_video.py @@ -20,9 +20,9 @@ else: cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -# Create an UVC (USB Video Class) output node +# Create an UVC (USB Video Class) output node. It needs 1920x1080, NV12 input uvc = pipeline.createUVC() -cam_rgb.isp.link(uvc.input) +cam_rgb.video.link(uvc.input) # Pipeline defined, now the device is connected to with dai.Device(pipeline) as device: diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 56aff6108..aaf135574 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -451,7 +451,7 @@ void NodeBindings::bind(pybind11::module& m){ // UVC node py::class_>(m, "UVC") - .def_readonly("input", &UVC::input) + .def_readonly("input", &UVC::input, DOC(dai, node, UVC, input)) ; //////////////////////////////////// From 5494d2407ab2c32c2566770a94a8feb814d6efd9 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 20 Sep 2021 22:54:33 +0300 Subject: [PATCH 07/66] Rename 19_uvc_video.py -> rgb_uvc.py --- examples/{19_uvc_video.py => rgb_uvc.py} | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) rename examples/{19_uvc_video.py => rgb_uvc.py} (90%) diff --git a/examples/19_uvc_video.py b/examples/rgb_uvc.py similarity index 90% rename from examples/19_uvc_video.py rename to examples/rgb_uvc.py index eb8be3a9a..d90fd6938 100755 --- a/examples/19_uvc_video.py +++ b/examples/rgb_uvc.py @@ -3,7 +3,7 @@ import depthai as dai import time -enable_4k = True +enable_4k = True # Will downscale 4K -> 1080p # Start defining a pipeline pipeline = dai.Pipeline() @@ -26,14 +26,12 @@ # Pipeline defined, now the device is connected to with dai.Device(pipeline) as device: - # Start pipeline - device.startPipeline() - print("\nDevice started, please keep this process running") print("and open an UVC viewer. Example on Linux:") print(" guvcview -d /dev/video0") print("\nTo close: Ctrl+C") + # Doing nothing here, just keeping the host feeding the watchdog while True: try: time.sleep(0.1) From e8bbeabeed5b6b6ebdfd6f2a71c1b2348b0af44d Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 20 Sep 2021 22:57:33 +0300 Subject: [PATCH 08/66] Disable manual focus for now --- examples/rgb_uvc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/rgb_uvc.py b/examples/rgb_uvc.py index d90fd6938..34c378099 100755 --- a/examples/rgb_uvc.py +++ b/examples/rgb_uvc.py @@ -12,7 +12,7 @@ cam_rgb = pipeline.createColorCamera() cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) cam_rgb.setInterleaved(False) -cam_rgb.initialControl.setManualFocus(130) +#cam_rgb.initialControl.setManualFocus(130) if enable_4k: cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) From f513b0f27b4f07a9940cd9dfe65d7fd6deeaf29d Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 31 Dec 2021 16:33:42 +0200 Subject: [PATCH 09/66] Update core/XLink: `fix_run_with_uvc` --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index ebf00a503..9ac4971dc 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit ebf00a5032bf823e2be63adf499293c24ab545d5 +Subproject commit 9ac4971dc8779efa1978f1841bab670dc2eb4b9e From c188a67723295e863e9f08d6ca2179c1a39df551 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 14 Mar 2022 02:45:00 +0200 Subject: [PATCH 10/66] rgb_uvc.py: add flashing capability (for devices like IoT or Pro). Usage: python3 examples/rgb_uvc.py -fb -f ^ Will flash the bootloader, then the application (app firmware + UVC pipeline). Then the device should start automatically in UVC mode upon power-on. It's still possible to use the depthai library --- examples/rgb_uvc.py | 73 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 16 deletions(-) diff --git a/examples/rgb_uvc.py b/examples/rgb_uvc.py index 34c378099..00f7d76aa 100755 --- a/examples/rgb_uvc.py +++ b/examples/rgb_uvc.py @@ -2,30 +2,71 @@ import depthai as dai import time +import argparse enable_4k = True # Will downscale 4K -> 1080p -# Start defining a pipeline -pipeline = dai.Pipeline() +parser = argparse.ArgumentParser() +parser.add_argument('-fb', '--flash-bootloader', default=False, action="store_true") +parser.add_argument('-f', '--flash-app', default=False, action="store_true") +args = parser.parse_args() -# Define a source - color camera -cam_rgb = pipeline.createColorCamera() -cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) -cam_rgb.setInterleaved(False) -#cam_rgb.initialControl.setManualFocus(130) +def getPipeline(): + # Start defining a pipeline + pipeline = dai.Pipeline() -if enable_4k: - cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) - cam_rgb.setIspScale(1, 2) -else: - cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) + # Define a source - color camera + cam_rgb = pipeline.createColorCamera() + cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) + cam_rgb.setInterleaved(False) + #cam_rgb.initialControl.setManualFocus(130) -# Create an UVC (USB Video Class) output node. It needs 1920x1080, NV12 input -uvc = pipeline.createUVC() -cam_rgb.video.link(uvc.input) + if enable_4k: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) + cam_rgb.setIspScale(1, 2) + else: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) + + # Create an UVC (USB Video Class) output node. It needs 1920x1080, NV12 input + uvc = pipeline.createUVC() + cam_rgb.video.link(uvc.input) + + return pipeline + +# Workaround for a bug with the timeout-enabled bootloader +progressCalled = False +# TODO move this under flash(), will need to handle `progressCalled` differently +def progress(p): + global progressCalled + progressCalled = True + print(f'Flashing progress: {p*100:.1f}%') + +# Will flash the bootloader if no pipeline is provided as argument +def flash(pipeline=None): + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + bootloader = dai.DeviceBootloader(bl, True) + + startTime = time.monotonic() + if pipeline is None: + print("Flashing bootloader...") + bootloader.flashBootloader(progress) + else: + print("Flashing application pipeline...") + bootloader.flash(progress, pipeline) + + if not progressCalled: + raise RuntimeError('Flashing failed, please try again') + elapsedTime = round(time.monotonic() - startTime, 2) + print("Done in", elapsedTime, "seconds") + +if args.flash_bootloader or args.flash_app: + if args.flash_bootloader: flash() + if args.flash_app: flash(getPipeline()) + print("Flashing successful. Please power-cycle the device") + quit() # Pipeline defined, now the device is connected to -with dai.Device(pipeline) as device: +with dai.Device(getPipeline()) as device: print("\nDevice started, please keep this process running") print("and open an UVC viewer. Example on Linux:") print(" guvcview -d /dev/video0") From 50ae49aedfc8050be10339e2c4bda54e6270696a Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Wed, 13 Jul 2022 00:07:32 +0300 Subject: [PATCH 11/66] Update UVC node to latest from custom_uvc_uac_scripting --- src/pipeline/NodeBindings.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index cbcaf65f7..c0cf4c321 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -1318,6 +1318,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ // UVC node uvc .def_readonly("input", &UVC::input, DOC(dai, node, UVC, input)) + .def("setGpiosOnInit", &UVC::setGpiosOnInit, py::arg("list"), DOC(dai, node, UVC, setGpiosOnInit)) + .def("setGpiosOnStreamOn", &UVC::setGpiosOnStreamOn, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOn)) + .def("setGpiosOnStreamOff", &UVC::setGpiosOnStreamOff, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOff)) ; From 8fad35045cadc034332f683a36781d010c1f7157 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Tue, 9 Aug 2022 22:41:48 +0300 Subject: [PATCH 12/66] FW: fix UVC with USB2 --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index e1b68a3a2..0c71697cc 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit e1b68a3a2020b875d5a875c80321fdccb87c042a +Subproject commit 0c71697cc8ea7d65c39227b0c46655bdbea44076 From 4ca3aa8183078f77f06cd8ab4687401dae868439 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 11 Aug 2022 03:49:31 +0300 Subject: [PATCH 13/66] FW: fix auto-exposure with large frame sizes (over 5000 W/H) --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 0c71697cc..42b329d43 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 0c71697cc8ea7d65c39227b0c46655bdbea44076 +Subproject commit 42b329d4352fc2c82f31bd181ede1c0678a39f3d From e6a4209ba1880a3c0f162a9ce2be5f34ba7579f6 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Wed, 7 Sep 2022 18:11:56 +0300 Subject: [PATCH 14/66] FW: IMX582 48MP config: 6.44 -> 10fps --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 42b329d43..9e5ca392f 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 42b329d4352fc2c82f31bd181ede1c0678a39f3d +Subproject commit 9e5ca392f54611f72cb2b1528901f27f48be6551 From 2f674695307bcca8042c46945f2b0a9b00f10e94 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 13 Apr 2023 20:15:34 +0300 Subject: [PATCH 15/66] FW: fix running 4 cameras with Ethernet, 4th cam enabled didn't stream due to device memory allocation --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 125feb8c2..c4af66caa 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 125feb8c2e16ee4bf71b7873a7b990f1c5f17b18 +Subproject commit c4af66caaf544ba1b409bee9ead30cd8ba3deda4 From a652f428834fd2c287f08cfad06af0caa4a612ef Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 18 Apr 2023 03:23:22 +0200 Subject: [PATCH 16/66] [FW] OAK-D-LR R1 preparation, OAK-D-SR camera enumeration fix --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index c4af66caa..53198b310 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit c4af66caaf544ba1b409bee9ead30cd8ba3deda4 +Subproject commit 53198b31067c97448051c64a39dd49cf5bc35647 From f03c4e5c09f2b441155ccfe3fdf1ad3ca8b39cee Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Wed, 19 Apr 2023 00:25:29 +0200 Subject: [PATCH 17/66] [FW / BL] Updated both FW & BL for OAK-D-LR R1. ETH fixes and moved to BNO086 IMU. --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 53198b310..c03bfeab6 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 53198b31067c97448051c64a39dd49cf5bc35647 +Subproject commit c03bfeab670ab4e325fec5ab8c23fb5804759a90 From 92f4425f0ecdcb48703a147510f1489b773cb150 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 20 Apr 2023 01:59:32 +0200 Subject: [PATCH 18/66] [BL] Updated to 0.0.25 release --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index c03bfeab6..8a1316f34 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit c03bfeab670ab4e325fec5ab8c23fb5804759a90 +Subproject commit 8a1316f343fd841fe5ad8e1618d17b2a31ad33b3 From 4297c410496fc62de63098caf4b879c279f88313 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 23 Apr 2023 15:26:05 +0200 Subject: [PATCH 19/66] Deprecated misleading board socket aliases --- depthai-core | 2 +- docs/source/components/nodes/color_camera.rst | 4 +- docs/source/components/nodes/mono_camera.rst | 4 +- docs/source/components/nodes/stereo_depth.rst | 215 ++++++++++++++++++ .../samples/host_side/device_information.rst | 2 +- examples/AprilTag/apriltag.py | 2 +- examples/AprilTag/apriltag_rgb.py | 2 +- examples/ColorCamera/rgb_isp_scale.py | 2 +- examples/ColorCamera/rgb_undistort.py | 2 +- examples/ColorCamera/rgb_video.py | 2 +- examples/EdgeDetector/edge_detector.py | 6 +- examples/FeatureTracker/feature_detector.py | 4 +- examples/FeatureTracker/feature_tracker.py | 4 +- examples/ImageManip/image_manip_rotate.py | 2 +- examples/MobileNet/mono_mobilenet.py | 2 +- examples/MonoCamera/mono_camera_control.py | 4 +- .../MonoCamera/mono_full_resolution_saver.py | 2 +- examples/MonoCamera/mono_preview.py | 4 +- .../MonoCamera/mono_preview_alternate_pro.py | 4 +- examples/NeuralNetwork/concat_multi_input.py | 4 +- .../ObjectTracker/spatial_object_tracker.py | 6 +- .../Script/script_change_pipeline_flow.py | 2 +- .../spatial_calculator_multi_roi.py | 4 +- .../spatial_location_calculator.py | 4 +- .../SpatialDetection/spatial_mobilenet.py | 6 +- .../spatial_mobilenet_mono.py | 4 +- .../SpatialDetection/spatial_tiny_yolo.py | 6 +- examples/StereoDepth/depth_colormap.py | 4 +- examples/StereoDepth/depth_crop_control.py | 4 +- examples/StereoDepth/depth_post_processing.py | 4 +- examples/StereoDepth/depth_preview.py | 4 +- examples/StereoDepth/depth_preview_lr.py | 4 +- examples/StereoDepth/rgb_depth_aligned.py | 10 +- .../rgb_depth_confidence_aligned.py | 10 +- .../StereoDepth/stereo_depth_from_host.py | 4 +- examples/StereoDepth/stereo_depth_video.py | 16 +- .../disparity_colormap_encoding.py | 4 +- examples/VideoEncoder/disparity_encoding.py | 4 +- examples/VideoEncoder/encoding_max_limit.py | 6 +- examples/VideoEncoder/rgb_encoding.py | 2 +- .../VideoEncoder/rgb_full_resolution_saver.py | 4 +- examples/VideoEncoder/rgb_mono_encoding.py | 6 +- examples/calibration/calibration_load.py | 4 +- examples/calibration/calibration_reader.py | 30 +-- examples/host_side/opencv_support.py | 2 +- examples/host_side/queue_add_callback.py | 4 +- examples/mixed/frame_sync.py | 4 +- examples/mixed/mono_depth_mobilenetssd.py | 4 +- examples/mixed/multiple_devices.py | 6 +- examples/mixed/report_camera_settings.py | 2 +- examples/mixed/rgb_encoding_mobilenet.py | 2 +- examples/mixed/rgb_encoding_mono_mobilenet.py | 4 +- .../rgb_encoding_mono_mobilenet_depth.py | 6 +- examples/mixed/rotated_spatial_detections.py | 6 +- src/CalibrationHandlerBindings.cpp | 2 +- utilities/cam_test.py | 8 +- 56 files changed, 345 insertions(+), 130 deletions(-) diff --git a/depthai-core b/depthai-core index 8a1316f34..4c67a5197 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 8a1316f343fd841fe5ad8e1618d17b2a31ad33b3 +Subproject commit 4c67a5197f605aeeccfbdc97bc730ea836028bb4 diff --git a/docs/source/components/nodes/color_camera.rst b/docs/source/components/nodes/color_camera.rst index 6323af090..d9718f8f7 100644 --- a/docs/source/components/nodes/color_camera.rst +++ b/docs/source/components/nodes/color_camera.rst @@ -87,7 +87,7 @@ Usage pipeline = dai.Pipeline() cam = pipeline.create(dai.node.ColorCamera) cam.setPreviewSize(300, 300) - cam.setBoardSocket(dai.CameraBoardSocket.RGB) + cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam.setInterleaved(False) cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB) @@ -97,7 +97,7 @@ Usage dai::Pipeline pipeline; auto cam = pipeline.create(); cam->setPreviewSize(300, 300); - cam->setBoardSocket(dai::CameraBoardSocket::RGB); + cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); cam->setInterleaved(false); cam->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB); diff --git a/docs/source/components/nodes/mono_camera.rst b/docs/source/components/nodes/mono_camera.rst index 3995de685..e5b278337 100644 --- a/docs/source/components/nodes/mono_camera.rst +++ b/docs/source/components/nodes/mono_camera.rst @@ -48,14 +48,14 @@ Usage pipeline = dai.Pipeline() mono = pipeline.create(dai.node.MonoCamera) - mono.setBoardSocket(dai.CameraBoardSocket.RIGHT) + mono.setCamera("right") mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) .. code-tab:: c++ dai::Pipeline pipeline; auto mono = pipeline.create(); - mono->setBoardSocket(dai::CameraBoardSocket::RIGHT); + mono->setCamera("right"); mono->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); Examples of functionality diff --git a/docs/source/components/nodes/stereo_depth.rst b/docs/source/components/nodes/stereo_depth.rst index 49a66c975..cd3d340c5 100644 --- a/docs/source/components/nodes/stereo_depth.rst +++ b/docs/source/components/nodes/stereo_depth.rst @@ -286,6 +286,221 @@ as: For the final disparity map, a filtering is applied based on the confidence threshold value: the pixels that have their confidence score larger than the threshold get invalidated, i.e. their disparity value is set to zero. You can set the confidence threshold with :code:`stereo.initialConfig.setConfidenceThreshold()`. +Calculate depth using disparity map +=================================== + +Disparity and depth are inversely related. As disparity decreases, depth increases exponentially depending on baseline and focal length. Meaning, if the disparity value is close to zero, then a small change in disparity generates a large change in depth. Similarly, if the disparity value is big, then large changes in disparity do not lead to a large change in depth. + +By considering this fact, depth can be calculated using this formula: + +.. code-block:: python + + depth = focal_length_in_pixels * baseline / disparity_in_pixels + +Where baseline is the distance between two mono cameras. Note the unit used for baseline and depth is the same. + +To get focal length in pixels, you can :ref:`read camera calibration `, as focal length in pixels is +written in camera intrinsics (``intrinsics[0][0]``): + +.. code-block:: python + + import depthai as dai + + with dai.Device() as device: + calibData = device.readCalibration() + intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C) + print('Right mono camera focal length in pixels:', intrinsics[0][0]) + +Here's theoretical calculation of the focal length in pixels: + +.. code-block:: python + + focal_length_in_pixels = image_width_in_pixels * 0.5 / tan(HFOV * 0.5 * PI/180) + + # With 400P mono camera resolution where HFOV=71.9 degrees + focal_length_in_pixels = 640 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 441.25 + + # With 800P mono camera resolution where HFOV=71.9 degrees + focal_length_in_pixels = 1280 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 882.5 + +Examples for calculating the depth value, using the OAK-D (7.5cm baseline): + +.. code-block:: python + + # For OAK-D @ 400P mono cameras and disparity of eg. 50 pixels + depth = 441.25 * 7.5 / 50 = 66.19 # cm + + # For OAK-D @ 800P mono cameras and disparity of eg. 10 pixels + depth = 882.5 * 7.5 / 10 = 661.88 # cm + +Note the value of disparity depth data is stored in :code:`uint16`, where 0 is a special value, meaning that distance is unknown. + +Min stereo depth distance +========================= + +If the depth results for close-in objects look weird, this is likely because they are below the minimum depth-perception distance of the device. + +To calculate this minimum distance, use the :ref:`depth formula ` and choose the maximum value for disparity_in_pixels parameter (keep in mind it is inveresly related, so maximum value will yield the smallest result). + +For example OAK-D has a baseline of **7.5cm**, focal_length_in_pixels of **882.5 pixels** and the default maximum value for disparity_in_pixels is **95**. By using the :ref:`depth formula ` we get: + +.. code-block:: python + + min_distance = focal_length_in_pixels * baseline / disparity_in_pixels = 882.5 * 7.5cm / 95 = 69.67cm + +or roughly 70cm. + +However this distance can be cut in 1/2 (to around 35cm for the OAK-D) with the following options: + +1. Changing the resolution to 640x400, instead of the standard 1280x800. + +2. Enabling Extended Disparity. + +Extended Disparity mode increases the levels of disparity to 191 from the standard 96 pixels, thereby 1/2-ing the minimum depth. It does so by computing the 96-pixel disparities on the original 1280x720 and on the downscaled 640x360 image, which are then merged to a 191-level disparity. For more information see the Extended Disparity tab in :ref:`this table `. + +Using the previous OAK-D example, disparity_in_pixels now becomes **190** and the minimum distance is: + +.. code-block:: python + + min_distance = focal_length_in_pixels * baseline / disparity_in_pixels = 882.5 * 7.5cm / 190 = 34.84cm + +or roughly 35cm. + +.. note:: + + Applying both of those options is possible, which would set the minimum depth to 1/4 of the standard settings, but at such short distances the minimum depth is limited by focal length, which is 19.6cm, since OAK-D mono cameras have fixed focus distance: 19.6cm - infinity. + +See `these examples `__ for how to enable Extended Disparity. + +Disparity shift to lower min depth perception +--------------------------------------------- + +Another option to perceive closer depth range is to use disparity shift. Disparity shift will shift the starting point +of the disparity search, which will significantly decrease max depth (MazZ) perception, but it will also decrease min depth (MinZ) perception. +Disparity shift can be combined with extended/subpixel/LR-check modes. + +.. image:: https://user-images.githubusercontent.com/18037362/189375017-2fa137d2-ad6b-46de-8899-6304bbc6c9d7.png + +**Left graph** shows min and max disparity and depth for OAK-D (7.5cm baseline, 800P resolution, ~70° HFOV) by default (disparity shift=0). See :ref:`Calculate depth using disparity map`. +Since hardware (stereo block) has a fixed 95 pixel disparity search, DepthAI will search from 0 pixels (depth=INF) to 95 pixels (depth=71cm). + +**Right graph** shows the same, but at disparity shift set to 30 pixels. This means that disparity search will be from 30 pixels (depth=2.2m) to 125 pixels (depth=50cm). +This also means that depth will be very accurate at the short range (**theoretically** below 5mm depth error). + +**Limitations**: + +- Because of the inverse relationship between disparity and depth, MaxZ will decrease much faster than MinZ as the disparity shift is increased. Therefore, it is **advised not to use a larger than necessary disparity shift**. +- Tradeoff in reducing the MinZ this way is that objects at **distances farther away than MaxZ will not be seen**. +- Because of the point above, **we only recommend using disparity shift when MaxZ is known**, such as having a depth camera mounted above a table pointing down at the table surface. +- Output disparity map is not expanded, only the depth map. So if disparity shift is set to 50, and disparity value obtained is 90, the real disparity is 140. + +**Compared to Extended disparity**, disparity shift: + +- **(+)** Is faster, as it doesn't require an extra computation, which means there's also no extra latency +- **(-)** Reduces the MaxZ (significantly), while extended disparity only reduces MinZ. + +Disparity shift can be combined with extended disparity. + +.. doxygenfunction:: dai::StereoDepthConfig::setDisparityShift + :project: depthai-core + :no-link: + +Max stereo depth distance +========================= + +The maximum depth perception distance depends on the :ref:`accuracy of the depth perception `. The formula used to calculate this distance is an approximation, but is as follows: + +.. code-block:: python + + Dm = (baseline/2) * tan((90 - HFOV / HPixels)*pi/180) + +So using this formula for existing models the *theoretical* max distance is: + +.. code-block:: python + + # For OAK-D (7.5cm baseline) + Dm = (7.5/2) * tan((90 - 71.9/1280)*pi/180) = 3825.03cm = 38.25 meters + + # For OAK-D-CM4 (9cm baseline) + Dm = (9/2) * tan((90 - 71.9/1280)*pi/180) = 4590.04cm = 45.9 meters + +If greater precision for long range measurements is required, consider enabling Subpixel Disparity or using a larger baseline distance between mono cameras. For a custom baseline, you could consider using `OAK-FFC `__ device or design your own baseboard PCB with required baseline. For more information see Subpixel Disparity under the Stereo Mode tab in :ref:`this table `. + +Depth perception accuracy +========================= + +Disparity depth works by matching features from one image to the other and its accuracy is based on multiple parameters: + +* Texture of objects / backgrounds + +Backgrounds may interfere with the object detection, since backgrounds are objects too, which will make depth perception less accurate. So disparity depth works very well outdoors as there are very rarely perfectly-clean/blank surfaces there - but these are relatively commonplace indoors (in clean buildings at least). + +* Lighting + +If the illumination is low, the diparity map will be of low confidence, which will result in a noisy depth map. + +* Baseline / distance to objects + +Lower baseline enables us to detect the depth at a closer distance as long as the object is visible in both the frames. However, this reduces the accuracy for large distances due to less pixels representing the object and disparity decreasing towards 0 much faster. +So the common norm is to adjust the baseline according to how far/close we want to be able to detect objects. + +Limitation +========== + +Since depth is calculated from disparity, which requires the pixels to overlap, there is inherently a vertical +band on the left side of the left mono camera and on the right side of the right mono camera, where depth +cannot be calculated, since it is seen by only 1 camera. That band is marked with :code:`B` +on the following picture. + +.. image:: https://user-images.githubusercontent.com/59799831/135310921-67726c28-07e7-4ffa-bc8d-74861049517e.png + +Meaning of variables on the picture: + +- ``BL [cm]`` - Baseline of stereo cameras. +- ``Dv [cm]`` - Minimum distace where both cameras see an object (thus where depth can be calculated). +- ``B [pixels]`` - Width of the band where depth cannot be calculated. +- ``W [pixels]`` - Width of mono in pixels camera or amount of horizontal pixels, also noted as :code:`HPixels` in other formulas. +- ``D [cm]`` - Distance from the **camera plane** to an object (see image :ref:`here `). +- ``F [cm]`` - Width of image at the distance ``D``. + +.. image:: https://user-images.githubusercontent.com/59799831/135310972-c37ba40b-20ad-4967-92a7-c71078bcef99.png + +With the use of the :code:`tan` function, the following formulas can be obtained: + +- :code:`F = 2 * D * tan(HFOV/2)` +- :code:`Dv = (BL/2) * tan(90 - HFOV/2)` + +In order to obtain :code:`B`, we can use :code:`tan` function again (same as for :code:`F`), but this time +we must also multiply it by the ratio between :code:`W` and :code:`F` in order to convert units to pixels. +That gives the following formula: + +.. code-block:: python + + B = 2 * Dv * tan(HFOV/2) * W / F + B = 2 * Dv * tan(HFOV/2) * W / (2 * D * tan(HFOV/2)) + B = W * Dv / D # pixels + +Example: If we are using OAK-D, which has a :code:`HFOV` of 72°, a baseline (:code:`BL`) of 7.5 cm and +:code:`640x400 (400P)` resolution is used, therefore :code:`W = 640` and an object is :code:`D = 100` cm away, we can +calculate :code:`B` in the following way: + +.. code-block:: + + Dv = 7.5 / 2 * tan(90 - 72/2) = 3.75 * tan(54°) = 5.16 cm + B = 640 * 5.16 / 100 = 33 # pixels + +Credit for calculations and images goes to our community member gregflurry, which he made on +`this `__ +forum post. + +.. note:: + + OAK-D-PRO will include both IR dot projector and IR LED, which will enable operation in no light. + IR LED is used to illuminate the whole area (for mono/color frames), while IR dot projector is mostly + for accurate disparity matching - to have good quality depth maps on blank surfaces as well. For outdoors, + the IR laser dot projector is only relevant at night. For more information see the development progress + `here `__. + Measuring real-world object dimensions ====================================== diff --git a/docs/source/samples/host_side/device_information.rst b/docs/source/samples/host_side/device_information.rst index 265c5c7f7..d19302bef 100644 --- a/docs/source/samples/host_side/device_information.rst +++ b/docs/source/samples/host_side/device_information.rst @@ -24,7 +24,7 @@ Demo Found device '192.168.33.192', MxId: '1844301011F4C51200', State: 'BOOTLOADER' Booting the first available camera (1.3)... - Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} + Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} Product name: OAK-D Pro AF, board name DM9098 diff --git a/examples/AprilTag/apriltag.py b/examples/AprilTag/apriltag.py index 2141ca132..bb41d5e70 100755 --- a/examples/AprilTag/apriltag.py +++ b/examples/AprilTag/apriltag.py @@ -19,7 +19,7 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") aprilTag.initialConfig.setFamily(dai.AprilTagConfig.Family.TAG_36H11) diff --git a/examples/AprilTag/apriltag_rgb.py b/examples/AprilTag/apriltag_rgb.py index 5d2858749..fa4f5ad31 100755 --- a/examples/AprilTag/apriltag_rgb.py +++ b/examples/AprilTag/apriltag_rgb.py @@ -20,7 +20,7 @@ # Properties camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) manip.initialConfig.setResize(480, 270) manip.initialConfig.setFrameType(dai.ImgFrame.Type.GRAY8) diff --git a/examples/ColorCamera/rgb_isp_scale.py b/examples/ColorCamera/rgb_isp_scale.py index 78f5370a8..fd8d725ea 100755 --- a/examples/ColorCamera/rgb_isp_scale.py +++ b/examples/ColorCamera/rgb_isp_scale.py @@ -13,7 +13,7 @@ xoutVideo.setStreamName("video") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) camRgb.setIspScale(1, 2) camRgb.setVideoSize(1920, 1080) diff --git a/examples/ColorCamera/rgb_undistort.py b/examples/ColorCamera/rgb_undistort.py index 5111bbca6..97e71b184 100755 --- a/examples/ColorCamera/rgb_undistort.py +++ b/examples/ColorCamera/rgb_undistort.py @@ -3,7 +3,7 @@ import numpy as np camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P -camSocket = dai.CameraBoardSocket.RGB +camSocket = dai.CameraBoardSocket.CAM_A ispScale = (1,2) def getMesh(calibData, ispSize): diff --git a/examples/ColorCamera/rgb_video.py b/examples/ColorCamera/rgb_video.py index c808e333b..66a32d2ae 100755 --- a/examples/ColorCamera/rgb_video.py +++ b/examples/ColorCamera/rgb_video.py @@ -13,7 +13,7 @@ xoutVideo.setStreamName("video") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setVideoSize(1920, 1080) diff --git a/examples/EdgeDetector/edge_detector.py b/examples/EdgeDetector/edge_detector.py index db699e404..b23dfd1a3 100755 --- a/examples/EdgeDetector/edge_detector.py +++ b/examples/EdgeDetector/edge_detector.py @@ -32,13 +32,13 @@ xinEdgeCfg.setStreamName(edgeCfgStr) # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight()) diff --git a/examples/FeatureTracker/feature_detector.py b/examples/FeatureTracker/feature_detector.py index c9a5642cc..ebad14411 100755 --- a/examples/FeatureTracker/feature_detector.py +++ b/examples/FeatureTracker/feature_detector.py @@ -27,9 +27,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Disable optical flow featureTrackerLeft.initialConfig.setMotionEstimator(False) diff --git a/examples/FeatureTracker/feature_tracker.py b/examples/FeatureTracker/feature_tracker.py index 61ddf4ce7..4be337e91 100755 --- a/examples/FeatureTracker/feature_tracker.py +++ b/examples/FeatureTracker/feature_tracker.py @@ -94,9 +94,9 @@ def __init__(self, trackbarName, windowName): # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Linking monoLeft.out.link(featureTrackerLeft.inputImage) diff --git a/examples/ImageManip/image_manip_rotate.py b/examples/ImageManip/image_manip_rotate.py index f986919e4..37b09733d 100755 --- a/examples/ImageManip/image_manip_rotate.py +++ b/examples/ImageManip/image_manip_rotate.py @@ -27,7 +27,7 @@ # Rotate mono frames monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") manipLeft = pipeline.create(dai.node.ImageManip) rr = dai.RotatedRect() diff --git a/examples/MobileNet/mono_mobilenet.py b/examples/MobileNet/mono_mobilenet.py index 635dfc6af..49ccae728 100755 --- a/examples/MobileNet/mono_mobilenet.py +++ b/examples/MobileNet/mono_mobilenet.py @@ -33,7 +33,7 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Convert the grayscale frame into the nn-acceptable form diff --git a/examples/MonoCamera/mono_camera_control.py b/examples/MonoCamera/mono_camera_control.py index 059ae8c66..b70c59441 100755 --- a/examples/MonoCamera/mono_camera_control.py +++ b/examples/MonoCamera/mono_camera_control.py @@ -48,8 +48,8 @@ def clamp(num, v0, v1): bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) manipRight.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) diff --git a/examples/MonoCamera/mono_full_resolution_saver.py b/examples/MonoCamera/mono_full_resolution_saver.py index 42a489361..06e263418 100755 --- a/examples/MonoCamera/mono_full_resolution_saver.py +++ b/examples/MonoCamera/mono_full_resolution_saver.py @@ -15,7 +15,7 @@ xoutRight.setStreamName("right") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking diff --git a/examples/MonoCamera/mono_preview.py b/examples/MonoCamera/mono_preview.py index 204164609..2a7936d5b 100755 --- a/examples/MonoCamera/mono_preview.py +++ b/examples/MonoCamera/mono_preview.py @@ -16,9 +16,9 @@ xoutRight.setStreamName('right') # Properties -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking diff --git a/examples/MonoCamera/mono_preview_alternate_pro.py b/examples/MonoCamera/mono_preview_alternate_pro.py index 826d5d927..6f5997150 100755 --- a/examples/MonoCamera/mono_preview_alternate_pro.py +++ b/examples/MonoCamera/mono_preview_alternate_pro.py @@ -19,11 +19,11 @@ monoL = pipeline.create(dai.node.MonoCamera) monoR = pipeline.create(dai.node.MonoCamera) -monoL.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoL.setCamera("left") monoL.setResolution(res) monoL.setFps(fps) monoL.setNumFramesPool(poolSize) -monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoR.setCamera("right") monoR.setResolution(res) monoR.setFps(fps) monoR.setNumFramesPool(poolSize) diff --git a/examples/NeuralNetwork/concat_multi_input.py b/examples/NeuralNetwork/concat_multi_input.py index 93e9d03d0..08b65a5f7 100755 --- a/examples/NeuralNetwork/concat_multi_input.py +++ b/examples/NeuralNetwork/concat_multi_input.py @@ -42,8 +42,8 @@ def create_mono(p, socket): nn.setNumInferenceThreads(2) camRgb.preview.link(nn.inputs['img2']) -create_mono(p, dai.CameraBoardSocket.LEFT).link(nn.inputs['img1']) -create_mono(p, dai.CameraBoardSocket.RIGHT).link(nn.inputs['img3']) +create_mono(p, dai.CameraBoardSocket.CAM_B).link(nn.inputs['img1']) +create_mono(p, dai.CameraBoardSocket.CAM_C).link(nn.inputs['img3']) # Send bouding box from the NN to the host via XLink nn_xout = p.createXLinkOut() diff --git a/examples/ObjectTracker/spatial_object_tracker.py b/examples/ObjectTracker/spatial_object_tracker.py index a181563cc..09053b09b 100755 --- a/examples/ObjectTracker/spatial_object_tracker.py +++ b/examples/ObjectTracker/spatial_object_tracker.py @@ -43,14 +43,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(args.nnPath) diff --git a/examples/Script/script_change_pipeline_flow.py b/examples/Script/script_change_pipeline_flow.py index 3774faf50..b528b99d7 100755 --- a/examples/Script/script_change_pipeline_flow.py +++ b/examples/Script/script_change_pipeline_flow.py @@ -10,7 +10,7 @@ pipeline = dai.Pipeline() cam = pipeline.createColorCamera() -cam.setBoardSocket(dai.CameraBoardSocket.RGB) +cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setInterleaved(False) cam.setIspScale(2,3) cam.setVideoSize(720,720) diff --git a/examples/SpatialDetection/spatial_calculator_multi_roi.py b/examples/SpatialDetection/spatial_calculator_multi_roi.py index dbf7c6fce..a2fbb6c9f 100755 --- a/examples/SpatialDetection/spatial_calculator_multi_roi.py +++ b/examples/SpatialDetection/spatial_calculator_multi_roi.py @@ -23,9 +23,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) stereo.setLeftRightCheck(True) diff --git a/examples/SpatialDetection/spatial_location_calculator.py b/examples/SpatialDetection/spatial_location_calculator.py index 8e30f0a3c..6642e8865 100755 --- a/examples/SpatialDetection/spatial_location_calculator.py +++ b/examples/SpatialDetection/spatial_location_calculator.py @@ -26,9 +26,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") lrcheck = False subpixel = False diff --git a/examples/SpatialDetection/spatial_mobilenet.py b/examples/SpatialDetection/spatial_mobilenet.py index ec2eff715..08e574a61 100755 --- a/examples/SpatialDetection/spatial_mobilenet.py +++ b/examples/SpatialDetection/spatial_mobilenet.py @@ -52,14 +52,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(nnBlobPath) diff --git a/examples/SpatialDetection/spatial_mobilenet_mono.py b/examples/SpatialDetection/spatial_mobilenet_mono.py index 30a237d2f..9b3a5ac5a 100755 --- a/examples/SpatialDetection/spatial_mobilenet_mono.py +++ b/examples/SpatialDetection/spatial_mobilenet_mono.py @@ -53,9 +53,9 @@ imageManip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # StereoDepth stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/SpatialDetection/spatial_tiny_yolo.py b/examples/SpatialDetection/spatial_tiny_yolo.py index 5575bccd6..2e1887dd6 100755 --- a/examples/SpatialDetection/spatial_tiny_yolo.py +++ b/examples/SpatialDetection/spatial_tiny_yolo.py @@ -75,14 +75,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(nnBlobPath) diff --git a/examples/StereoDepth/depth_colormap.py b/examples/StereoDepth/depth_colormap.py index 05d86f85c..26de44431 100755 --- a/examples/StereoDepth/depth_colormap.py +++ b/examples/StereoDepth/depth_colormap.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_crop_control.py b/examples/StereoDepth/depth_crop_control.py index eae2677a4..0eecf7c53 100755 --- a/examples/StereoDepth/depth_crop_control.py +++ b/examples/StereoDepth/depth_crop_control.py @@ -32,8 +32,8 @@ bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) diff --git a/examples/StereoDepth/depth_post_processing.py b/examples/StereoDepth/depth_post_processing.py index b1844b1ca..732e91232 100755 --- a/examples/StereoDepth/depth_post_processing.py +++ b/examples/StereoDepth/depth_post_processing.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview.py b/examples/StereoDepth/depth_preview.py index 017d3a403..b916ecb55 100755 --- a/examples/StereoDepth/depth_preview.py +++ b/examples/StereoDepth/depth_preview.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview_lr.py b/examples/StereoDepth/depth_preview_lr.py index 5cc37c063..8bf6fff6d 100755 --- a/examples/StereoDepth/depth_preview_lr.py +++ b/examples/StereoDepth/depth_preview_lr.py @@ -51,7 +51,7 @@ # Properties left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setIspScale(2, 3) center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) @@ -59,7 +59,7 @@ center.setIspScale(2, 3) right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setIspScale(2, 3) LC_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/rgb_depth_aligned.py b/examples/StereoDepth/rgb_depth_aligned.py index c76dd7b79..3e72fc023 100755 --- a/examples/StereoDepth/rgb_depth_aligned.py +++ b/examples/StereoDepth/rgb_depth_aligned.py @@ -48,7 +48,7 @@ def updateBlendWeights(percent_rgb): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -56,22 +56,22 @@ def updateBlendWeights(percent_rgb): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) # Linking camRgb.isp.link(rgbOut.input) diff --git a/examples/StereoDepth/rgb_depth_confidence_aligned.py b/examples/StereoDepth/rgb_depth_confidence_aligned.py index e1487c9ac..226707a81 100755 --- a/examples/StereoDepth/rgb_depth_confidence_aligned.py +++ b/examples/StereoDepth/rgb_depth_confidence_aligned.py @@ -68,7 +68,7 @@ def updateConfBlendWeights(percent): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -76,24 +76,24 @@ def updateConfBlendWeights(percent): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) if 0: stereo.setSubpixel(True) # TODO enable for test -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) xoutConfMap = pipeline.create(dai.node.XLinkOut) xoutConfMap.setStreamName('confidence_map') diff --git a/examples/StereoDepth/stereo_depth_from_host.py b/examples/StereoDepth/stereo_depth_from_host.py index d6082df2e..bf5328785 100755 --- a/examples/StereoDepth/stereo_depth_from_host.py +++ b/examples/StereoDepth/stereo_depth_from_host.py @@ -281,7 +281,7 @@ def trackbarDisparityShift(value): for tr in StereoConfigHandler.trDisparityShift: tr.set(value) - def trackbarCenterAlignmentShift(value): + def trackbarCenterAlignmentShift(value): if StereoConfigHandler.config.algorithmControl.depthAlign != dai.StereoDepthConfig.AlgorithmControl.DepthAlign.CENTER: print("Center alignment shift factor requires CENTER alignment enabled!") return @@ -641,7 +641,7 @@ def convertToCv2Frame(name, image, config): stereoDepthConfigInQueue = device.getInputQueue("stereoDepthConfig") inStreams = ['in_left', 'in_right'] - inStreamsCameraID = [dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RIGHT] + inStreamsCameraID = [dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C] in_q_list = [] for s in inStreams: q = device.getInputQueue(s) diff --git a/examples/StereoDepth/stereo_depth_video.py b/examples/StereoDepth/stereo_depth_video.py index 8ee3a2269..6d909782a 100755 --- a/examples/StereoDepth/stereo_depth_video.py +++ b/examples/StereoDepth/stereo_depth_video.py @@ -121,11 +121,11 @@ def getMesh(calibData): - M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, resolution[0], resolution[1])) - d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution[0], resolution[1])) + d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) R1 = np.array(calibData.getStereoLeftRectificationRotation()) - M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, resolution[0], resolution[1])) - d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution[0], resolution[1])) + d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) R2 = np.array(calibData.getStereoRightRectificationRotation()) mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, resolution, cv2.CV_32FC1) mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, resolution, cv2.CV_32FC1) @@ -205,11 +205,11 @@ def getDisparityFrame(frame, cvColorMap): xoutRectifRight = pipeline.create(dai.node.XLinkOut) if args.swap_left_right: - camLeft.setBoardSocket(dai.CameraBoardSocket.RIGHT) - camRight.setBoardSocket(dai.CameraBoardSocket.LEFT) + camLeft.setCamera("right") + camRight.setCamera("left") else: - camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) - camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + camLeft.setCamera("left") + camRight.setCamera("right") res = ( dai.MonoCameraProperties.SensorResolution.THE_800_P diff --git a/examples/VideoEncoder/disparity_colormap_encoding.py b/examples/VideoEncoder/disparity_colormap_encoding.py index 09d602b10..c0d2c8ce1 100755 --- a/examples/VideoEncoder/disparity_colormap_encoding.py +++ b/examples/VideoEncoder/disparity_colormap_encoding.py @@ -8,11 +8,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/disparity_encoding.py b/examples/VideoEncoder/disparity_encoding.py index 951e1c1b8..1e9483d4f 100755 --- a/examples/VideoEncoder/disparity_encoding.py +++ b/examples/VideoEncoder/disparity_encoding.py @@ -10,11 +10,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/encoding_max_limit.py b/examples/VideoEncoder/encoding_max_limit.py index b01f430ed..94c0b555d 100755 --- a/examples/VideoEncoder/encoding_max_limit.py +++ b/examples/VideoEncoder/encoding_max_limit.py @@ -22,10 +22,10 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Setting to 26fps will trigger error ve1.setDefaultProfilePreset(25, dai.VideoEncoderProperties.Profile.H264_MAIN) diff --git a/examples/VideoEncoder/rgb_encoding.py b/examples/VideoEncoder/rgb_encoding.py index 45b51f991..3d5eadcdd 100755 --- a/examples/VideoEncoder/rgb_encoding.py +++ b/examples/VideoEncoder/rgb_encoding.py @@ -13,7 +13,7 @@ xout.setStreamName('h265') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/VideoEncoder/rgb_full_resolution_saver.py b/examples/VideoEncoder/rgb_full_resolution_saver.py index 381784f46..d2c06640a 100755 --- a/examples/VideoEncoder/rgb_full_resolution_saver.py +++ b/examples/VideoEncoder/rgb_full_resolution_saver.py @@ -9,7 +9,7 @@ pipeline = dai.Pipeline() camRgb = pipeline.create(dai.node.ColorCamera) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) xoutRgb = pipeline.create(dai.node.XLinkOut) @@ -56,7 +56,7 @@ with open(fName, "wb") as f: f.write(qStill.get().getData()) print('Image saved to', fName) - + key = cv2.waitKey(1) if key == ord('q'): break diff --git a/examples/VideoEncoder/rgb_mono_encoding.py b/examples/VideoEncoder/rgb_mono_encoding.py index 09ec1c036..5aeffb92e 100755 --- a/examples/VideoEncoder/rgb_mono_encoding.py +++ b/examples/VideoEncoder/rgb_mono_encoding.py @@ -22,9 +22,9 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding ve1.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_MAIN) ve2.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/calibration/calibration_load.py b/examples/calibration/calibration_load.py index 9a2995a22..44e8996dc 100755 --- a/examples/calibration/calibration_load.py +++ b/examples/calibration/calibration_load.py @@ -28,10 +28,10 @@ # MonoCamera monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") # monoLeft.setFps(5.0) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # monoRight.setFps(5.0) # Linking diff --git a/examples/calibration/calibration_reader.py b/examples/calibration/calibration_reader.py index c49b93b59..9f3d82564 100755 --- a/examples/calibration/calibration_reader.py +++ b/examples/calibration/calibration_reader.py @@ -14,66 +14,66 @@ calibData = device.readCalibration() calibData.eepromToJsonFile(calibFile) - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) if "OAK-1" in calibData.getEepromData().boardName or "BW1093OAK" in calibData.getEepromData().boardName: - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 1280, 720)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 1280, 720)) print("RGB Camera resized intrinsics...") print(M_rgb) - D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RGB)) + D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_A)) print("RGB Distortion Coefficients...") [print(name + ": " + value) for (name, value) in zip(["k1", "k2", "p1", "p2", "k3", "k4", "k5", "k6", "s1", "s2", "s3", "s4", "τx", "τy"], [str(data) for data in D_rgb])] - print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}') + print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}') else: - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 3840, 2160)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 3840, 2160)) print("RGB Camera resized intrinsics... 3840 x 2160 ") print(M_rgb) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 4056, 3040 )) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 4056, 3040 )) print("RGB Camera resized intrinsics... 4056 x 3040 ") print(M_rgb) - M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.LEFT) + M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_B) print("LEFT Camera Default intrinsics...") print(M_left) print(width) print(height) - M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720)) + M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, 1280, 720)) print("LEFT Camera resized intrinsics... 1280 x 720") print(M_left) - M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720)) + M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, 1280, 720)) print("RIGHT Camera resized intrinsics... 1280 x 720") print(M_right) - D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) print("LEFT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_left])] - D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) print("RIGHT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_right])] - print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.LEFT)}") + print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.CAM_B)}") R1 = np.array(calibData.getStereoLeftRectificationRotation()) R2 = np.array(calibData.getStereoRightRectificationRotation()) @@ -87,10 +87,10 @@ print("RIGHT Camera stereo rectification matrix...") print(H_right) - lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RIGHT)) + lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C)) print("Transformation matrix of where left Camera is W.R.T right Camera's optical center") print(lr_extrinsics) - l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RGB)) + l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_A)) print("Transformation matrix of where left Camera is W.R.T RGB Camera's optical center") print(l_rgb_extrinsics) diff --git a/examples/host_side/opencv_support.py b/examples/host_side/opencv_support.py index f439c3fe3..ae98a9b02 100755 --- a/examples/host_side/opencv_support.py +++ b/examples/host_side/opencv_support.py @@ -16,7 +16,7 @@ # Properties camRgb.setPreviewSize(300, 300) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(True) camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) diff --git a/examples/host_side/queue_add_callback.py b/examples/host_side/queue_add_callback.py index e9c21d0f3..6feb194d0 100755 --- a/examples/host_side/queue_add_callback.py +++ b/examples/host_side/queue_add_callback.py @@ -17,9 +17,9 @@ # Properties camRgb.setPreviewSize(300, 300) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Stream all the camera streams through the same XLink node diff --git a/examples/mixed/frame_sync.py b/examples/mixed/frame_sync.py index 2fff31edc..29226e285 100755 --- a/examples/mixed/frame_sync.py +++ b/examples/mixed/frame_sync.py @@ -15,12 +15,12 @@ left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(FPS) right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(FPS) stereo = pipeline.createStereoDepth() diff --git a/examples/mixed/mono_depth_mobilenetssd.py b/examples/mixed/mono_depth_mobilenetssd.py index 145b1f915..d7bc31127 100755 --- a/examples/mixed/mono_depth_mobilenetssd.py +++ b/examples/mixed/mono_depth_mobilenetssd.py @@ -38,9 +38,9 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Produce the depth map (using disparity output as it's easier to visualize depth this way) diff --git a/examples/mixed/multiple_devices.py b/examples/mixed/multiple_devices.py index 7c3252823..789694d63 100755 --- a/examples/mixed/multiple_devices.py +++ b/examples/mixed/multiple_devices.py @@ -11,7 +11,7 @@ def createPipeline(): camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) - camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) + camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(False) @@ -47,7 +47,7 @@ def createPipeline(): print(" >>> Board name:", eepromData.boardName) if eepromData.productName != "": print(" >>> Product name:", eepromData.productName) - + pipeline = createPipeline() device.startPipeline(pipeline) @@ -55,7 +55,7 @@ def createPipeline(): q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) stream_name = "rgb-" + mxId + "-" + eepromData.productName qRgbMap.append((q_rgb, stream_name)) - + while True: for q_rgb, stream_name in qRgbMap: if q_rgb.has(): diff --git a/examples/mixed/report_camera_settings.py b/examples/mixed/report_camera_settings.py index 25b1780fc..78283f09d 100755 --- a/examples/mixed/report_camera_settings.py +++ b/examples/mixed/report_camera_settings.py @@ -16,7 +16,7 @@ camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +camLeft.setCamera("left") xoutLeft = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName("left") diff --git a/examples/mixed/rgb_encoding_mobilenet.py b/examples/mixed/rgb_encoding_mobilenet.py index fbcb10192..bf00c8bb8 100755 --- a/examples/mixed/rgb_encoding_mobilenet.py +++ b/examples/mixed/rgb_encoding_mobilenet.py @@ -36,7 +36,7 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setPreviewSize(300, 300) camRgb.setInterleaved(False) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet.py b/examples/mixed/rgb_encoding_mono_mobilenet.py index f4822ab0e..850635b21 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet.py @@ -40,9 +40,9 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py index 5b08ce42f..f6a0f627d 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py @@ -44,11 +44,11 @@ nnOut.setStreamName('nn') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rotated_spatial_detections.py b/examples/mixed/rotated_spatial_detections.py index 3b386a48b..90913d84d 100755 --- a/examples/mixed/rotated_spatial_detections.py +++ b/examples/mixed/rotated_spatial_detections.py @@ -50,14 +50,14 @@ camRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) rotate_stereo_manip = pipeline.createImageManip() diff --git a/src/CalibrationHandlerBindings.cpp b/src/CalibrationHandlerBindings.cpp index 734692c07..52f7bf3ae 100644 --- a/src/CalibrationHandlerBindings.cpp +++ b/src/CalibrationHandlerBindings.cpp @@ -47,7 +47,7 @@ void CalibrationHandlerBindings::bind(pybind11::module& m, void* pCallstack){ .def("getCameraExtrinsics", &CalibrationHandler::getCameraExtrinsics, py::arg("srcCamera"), py::arg("dstCamera"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getCameraExtrinsics)) .def("getCameraTranslationVector", &CalibrationHandler::getCameraTranslationVector, py::arg("srcCamera"), py::arg("dstCamera"), py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getCameraTranslationVector)) - .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::RIGHT, py::arg("cam2") = dai::CameraBoardSocket::LEFT, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) + .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::CAM_C, py::arg("cam2") = dai::CameraBoardSocket::CAM_B, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) .def("getCameraToImuExtrinsics", &CalibrationHandler::getCameraToImuExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getCameraToImuExtrinsics)) .def("getImuToCameraExtrinsics", &CalibrationHandler::getImuToCameraExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getImuToCameraExtrinsics)) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 18e6c3fc7..6d04596e7 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -109,10 +109,10 @@ def socket_type_pair(arg): print("DepthAI path:", dai.__file__) cam_socket_opts = { - 'rgb': dai.CameraBoardSocket.RGB, # Or CAM_A - 'left': dai.CameraBoardSocket.LEFT, # Or CAM_B - 'right': dai.CameraBoardSocket.RIGHT, # Or CAM_C - 'camd': dai.CameraBoardSocket.CAM_D, + 'rgb' : dai.CameraBoardSocket.CAM_A, + 'left' : dai.CameraBoardSocket.CAM_B, + 'right': dai.CameraBoardSocket.CAM_C, + 'camd' : dai.CameraBoardSocket.CAM_D, } cam_socket_to_name = { From 2aa41fba27e3b16426c2401846dbeee50e909e7a Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 23 Apr 2023 15:26:06 +0200 Subject: [PATCH 20/66] Added deprecation of usb2Mode constructors --- src/DeviceBindings.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 673bac270..cb0990572 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -219,6 +219,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev); }), py::arg("pipeline"), DOC(dai, DeviceBase, DeviceBase)) .def(py::init([](const Pipeline& pipeline, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(pipeline, dev, usb2Mode); @@ -234,6 +235,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev, pathToCmd); }), py::arg("pipeline"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 4)) .def(py::init([](const Pipeline& pipeline, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(pipeline, deviceInfo, usb2Mode); }), py::arg("pipeline"), py::arg("devInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 6)) @@ -253,6 +255,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(version, dev); }), py::arg("version") = OpenVINO::VERSION_UNIVERSAL, DOC(dai, DeviceBase, DeviceBase, 10)) .def(py::init([](OpenVINO::Version version, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(version, dev, usb2Mode); @@ -268,6 +271,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(version, dev, pathToCmd); }), py::arg("version"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 13)) .def(py::init([](OpenVINO::Version version, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(version, deviceInfo, usb2Mode); }), py::arg("version"), py::arg("deviceInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 15)) From 3d4440b04d2a6b65073fbd2aecaa19af95ad80e4 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 23 Apr 2023 15:26:17 +0200 Subject: [PATCH 21/66] Added a comment to deprecated values --- src/pipeline/CommonBindings.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index abb40023c..d4be59051 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -131,10 +131,6 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ // CameraBoardSocket enum bindings cameraBoardSocket .value("AUTO", CameraBoardSocket::AUTO) - .value("RGB", CameraBoardSocket::RGB) - .value("LEFT", CameraBoardSocket::LEFT) - .value("RIGHT", CameraBoardSocket::RIGHT) - .value("CENTER", CameraBoardSocket::CENTER) .value("CAM_A", CameraBoardSocket::CAM_A) .value("CAM_B", CameraBoardSocket::CAM_B) .value("CAM_C", CameraBoardSocket::CAM_C) @@ -143,6 +139,13 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("CAM_F", CameraBoardSocket::CAM_F) .value("CAM_G", CameraBoardSocket::CAM_G) .value("CAM_H", CameraBoardSocket::CAM_H) + + // Deprecated + // TODO(themarpe) - issue a Deprecation warning + .value("RGB", CameraBoardSocket::RGB, "**Deprecated:** Use CAM_A or address camera by name instead") + .value("LEFT", CameraBoardSocket::LEFT, "**Deprecated:** Use CAM_B or address camera by name instead") + .value("RIGHT", CameraBoardSocket::RIGHT, "**Deprecated:** Use CAM_C or address camera by name instead") + .value("CENTER", CameraBoardSocket::CENTER, "**Deprecated:** Use CAM_A or address camera by name instead") ; // CameraSensorType enum bindings From 2f0f17ac463ef8dcdb9b966eb5b6d19b29213f5d Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 23 Apr 2023 15:26:29 +0200 Subject: [PATCH 22/66] Fixed deprecation for CameraBoardSocket --- src/pipeline/CommonBindings.cpp | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index d4be59051..9469d4156 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -1,5 +1,8 @@ #include "CommonBindings.hpp" +// Libraries +#include "hedley/hedley.h" + // depthai-shared #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/EepromData.hpp" @@ -141,11 +144,31 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("CAM_H", CameraBoardSocket::CAM_H) // Deprecated - // TODO(themarpe) - issue a Deprecation warning + HEDLEY_DIAGNOSTIC_PUSH + HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED .value("RGB", CameraBoardSocket::RGB, "**Deprecated:** Use CAM_A or address camera by name instead") .value("LEFT", CameraBoardSocket::LEFT, "**Deprecated:** Use CAM_B or address camera by name instead") .value("RIGHT", CameraBoardSocket::RIGHT, "**Deprecated:** Use CAM_C or address camera by name instead") .value("CENTER", CameraBoardSocket::CENTER, "**Deprecated:** Use CAM_A or address camera by name instead") + + // Deprecated overriden + .def_property_readonly_static("RGB", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RGB is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("CENTER", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "CENTER is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("LEFT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "LEFT is deprecated, use CAM_B or address camera by name instead.", 1); + return CameraBoardSocket::CAM_B; + }) + .def_property_readonly_static("RIGHT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RIGHT is deprecated, use CAM_C or address camera by name instead.", 1); + return CameraBoardSocket::CAM_C; + }) + HEDLEY_DIAGNOSTIC_POP ; // CameraSensorType enum bindings From 6ccf5b5c13ca1ae3b822278f0f7ba2622bb6109f Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Sun, 23 Apr 2023 17:22:58 +0200 Subject: [PATCH 23/66] Fixed Hedley usage --- src/pipeline/CommonBindings.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 9469d4156..4c3d4d36f 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -132,6 +132,10 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ ; // CameraBoardSocket enum bindings + + // Deprecated + HEDLEY_DIAGNOSTIC_PUSH + HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED cameraBoardSocket .value("AUTO", CameraBoardSocket::AUTO) .value("CAM_A", CameraBoardSocket::CAM_A) @@ -143,9 +147,6 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("CAM_G", CameraBoardSocket::CAM_G) .value("CAM_H", CameraBoardSocket::CAM_H) - // Deprecated - HEDLEY_DIAGNOSTIC_PUSH - HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED .value("RGB", CameraBoardSocket::RGB, "**Deprecated:** Use CAM_A or address camera by name instead") .value("LEFT", CameraBoardSocket::LEFT, "**Deprecated:** Use CAM_B or address camera by name instead") .value("RIGHT", CameraBoardSocket::RIGHT, "**Deprecated:** Use CAM_C or address camera by name instead") @@ -168,8 +169,8 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ PyErr_WarnEx(PyExc_DeprecationWarning, "RIGHT is deprecated, use CAM_C or address camera by name instead.", 1); return CameraBoardSocket::CAM_C; }) - HEDLEY_DIAGNOSTIC_POP ; + HEDLEY_DIAGNOSTIC_POP // CameraSensorType enum bindings cameraSensorType From 4cac921ad3520b29cd85730a8b6bf8ce4dd73962 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Sun, 23 Apr 2023 18:47:25 +0200 Subject: [PATCH 24/66] Added more checks when parsing message and a test --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index dbb0f95bd..a24e90707 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit dbb0f95bd0fff6ef5111c7c1440b214067e0102d +Subproject commit a24e9070703c99a7df226696d3d5d806452946d4 From 091845ebf2b3ca82c51ee7d50eea3e45cb11ca76 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Mon, 24 Apr 2023 15:25:33 +0200 Subject: [PATCH 25/66] Add get/set to all config messages --- depthai-core | 2 +- src/pipeline/datatype/CameraControlBindings.cpp | 2 ++ src/pipeline/datatype/EdgeDetectorConfigBindings.cpp | 2 ++ src/pipeline/datatype/ImageManipConfigBindings.cpp | 2 ++ .../datatype/SpatialLocationCalculatorConfigBindings.cpp | 2 ++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index dbb0f95bd..cb80458e7 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit dbb0f95bd0fff6ef5111c7c1440b214067e0102d +Subproject commit cb80458e799923f4ac749ef34c7f69326a32f02a diff --git a/src/pipeline/datatype/CameraControlBindings.cpp b/src/pipeline/datatype/CameraControlBindings.cpp index 9fb47a62d..06ebb4ab4 100644 --- a/src/pipeline/datatype/CameraControlBindings.cpp +++ b/src/pipeline/datatype/CameraControlBindings.cpp @@ -214,11 +214,13 @@ std::vector camCtrlAttr; .def("setChromaDenoise", &CameraControl::setChromaDenoise, py::arg("value"), DOC(dai, CameraControl, setChromaDenoise)) .def("setSceneMode", &CameraControl::setSceneMode, py::arg("mode"), DOC(dai, CameraControl, setSceneMode)) .def("setEffectMode", &CameraControl::setEffectMode, py::arg("mode"), DOC(dai, CameraControl, setEffectMode)) + .def("set", &CameraControl::set, py::arg("config"), DOC(dai, CameraControl, set)) // getters .def("getCaptureStill", &CameraControl::getCaptureStill, DOC(dai, CameraControl, getCaptureStill)) .def("getExposureTime", &CameraControl::getExposureTime, DOC(dai, CameraControl, getExposureTime)) .def("getSensitivity", &CameraControl::getSensitivity, DOC(dai, CameraControl, getSensitivity)) .def("getLensPosition", &CameraControl::getLensPosition, DOC(dai, CameraControl, getLensPosition)) + .def("get", &CameraControl::get, DOC(dai, CameraControl, get)) ; // Add also enum attributes from RawCameraControl for (const auto& a : camCtrlAttr) { diff --git a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp index fd5e39a11..eaefb6825 100644 --- a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp +++ b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp @@ -50,6 +50,8 @@ void bind_edgedetectorconfig(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def("setSobelFilterKernels", &EdgeDetectorConfig::setSobelFilterKernels, py::arg("horizontalKernel"), py::arg("verticalKernel"), DOC(dai, EdgeDetectorConfig, setSobelFilterKernels)) .def("getConfigData", &EdgeDetectorConfig::getConfigData, DOC(dai, EdgeDetectorConfig, getConfigData)) + .def("get", &EdgeDetectorConfig::get, DOC(dai, EdgeDetectorConfig, get)) + .def("set", &EdgeDetectorConfig::set, py::arg("config"), DOC(dai, EdgeDetectorConfig, set)) ; } diff --git a/src/pipeline/datatype/ImageManipConfigBindings.cpp b/src/pipeline/datatype/ImageManipConfigBindings.cpp index e94ced202..694912999 100644 --- a/src/pipeline/datatype/ImageManipConfigBindings.cpp +++ b/src/pipeline/datatype/ImageManipConfigBindings.cpp @@ -120,6 +120,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("setReusePreviousImage", &ImageManipConfig::setReusePreviousImage, py::arg("reuse"), DOC(dai, ImageManipConfig, setReusePreviousImage)) .def("setSkipCurrentImage", &ImageManipConfig::setSkipCurrentImage, py::arg("skip"), DOC(dai, ImageManipConfig, setSkipCurrentImage)) .def("setKeepAspectRatio", &ImageManipConfig::setKeepAspectRatio, py::arg("keep"), DOC(dai, ImageManipConfig, setKeepAspectRatio)) + .def("set", &ImageManipConfig::set, py::arg("config"), DOC(dai, ImageManipConfig, set)) // getters .def("getCropXMin", &ImageManipConfig::getCropXMin, DOC(dai, ImageManipConfig, getCropXMin)) @@ -133,6 +134,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("getFormatConfig", &ImageManipConfig::getFormatConfig, DOC(dai, ImageManipConfig, getFormatConfig)) .def("isResizeThumbnail", &ImageManipConfig::isResizeThumbnail, DOC(dai, ImageManipConfig, isResizeThumbnail)) .def("getColormap", &ImageManipConfig::getColormap, DOC(dai, ImageManipConfig, getColormap)) + .def("get", &ImageManipConfig::get, DOC(dai, ImageManipConfig, get)) ; diff --git a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp index 84b6a4851..7d56a7d5e 100644 --- a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp +++ b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp @@ -64,6 +64,8 @@ void bind_spatiallocationcalculatorconfig(pybind11::module& m, void* pCallstack) .def("setROIs", &SpatialLocationCalculatorConfig::setROIs, py::arg("ROIs"), DOC(dai, SpatialLocationCalculatorConfig, setROIs)) .def("addROI", &SpatialLocationCalculatorConfig::addROI, py::arg("ROI"), DOC(dai, SpatialLocationCalculatorConfig, addROI)) .def("getConfigData", &SpatialLocationCalculatorConfig::getConfigData, DOC(dai, SpatialLocationCalculatorConfig, getConfigData)) + .def("set", &SpatialLocationCalculatorConfig::set, py::arg("config"), DOC(dai, SpatialLocationCalculatorConfig, set)) + .def("get", &SpatialLocationCalculatorConfig::get, DOC(dai, SpatialLocationCalculatorConfig, get)) ; } From d92ed41e715c77c1ff7e04831122dbdb2f91a206 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Wed, 26 Apr 2023 20:26:36 +0200 Subject: [PATCH 26/66] Added bindings for some logging and profiling modifications --- depthai-core | 2 +- src/DeviceBindings.cpp | 8 ++++++-- src/XLinkBindings.cpp | 1 + src/pipeline/CommonBindings.cpp | 7 +++++++ src/pipeline/datatype/ImgFrameBindings.cpp | 2 +- 5 files changed, 16 insertions(+), 4 deletions(-) diff --git a/depthai-core b/depthai-core index cb80458e7..d27ad1e39 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit cb80458e799923f4ac749ef34c7f69326a32f02a +Subproject commit d27ad1e39528f2ecdd160d41feb5716790cb8309 diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index cb0990572..868322e43 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -482,13 +482,15 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("version", &Device::Config::version) .def_readwrite("board", &Device::Config::board) .def_readwrite("nonExclusiveMode", &Device::Config::nonExclusiveMode) + .def_readwrite("outputLogLevel", &Device::Config::outputLogLevel) + .def_readwrite("logLevel", &Device::Config::logLevel) ; // Bind CrashDump crashDump .def(py::init<>()) .def("serializeToJson", &CrashDump::serializeToJson, DOC(dai, CrashDump, serializeToJson)) - + .def_readwrite("crashReports", &CrashDump::crashReports, DOC(dai, CrashDump, crashReports)) .def_readwrite("depthaiCommitHash", &CrashDump::depthaiCommitHash, DOC(dai, CrashDump, depthaiCommitHash)) .def_readwrite("deviceId", &CrashDump::deviceId, DOC(dai, CrashDump, deviceId)) @@ -501,7 +503,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("crashedThreadId", &CrashDump::CrashReport::crashedThreadId, DOC(dai, CrashDump, CrashReport, crashedThreadId)) .def_readwrite("threadCallstack", &CrashDump::CrashReport::threadCallstack, DOC(dai, CrashDump, CrashReport, threadCallstack)) ; - + errorSourceInfo .def(py::init<>()) .def_readwrite("assertContext", &CrashDump::CrashReport::ErrorSourceInfo::assertContext, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, assertContext)) @@ -566,6 +568,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_static("getEmbeddedDeviceBinary", py::overload_cast(&DeviceBase::getEmbeddedDeviceBinary), py::arg("config"), DOC(dai, DeviceBase, getEmbeddedDeviceBinary, 2)) .def_static("getDeviceByMxId", &DeviceBase::getDeviceByMxId, py::arg("mxId"), DOC(dai, DeviceBase, getDeviceByMxId)) .def_static("getAllConnectedDevices", &DeviceBase::getAllConnectedDevices, DOC(dai, DeviceBase, getAllConnectedDevices)) + .def_static("getGlobalProfilingData", &DeviceBase::getGlobalProfilingData, DOC(dai, DeviceBase, getGlobalProfilingData)) // methods .def("getBootloaderVersion", &DeviceBase::getBootloaderVersion, DOC(dai, DeviceBase, getBootloaderVersion)) @@ -612,6 +615,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getUsbSpeed", [](DeviceBase& d) { py::gil_scoped_release release; return d.getUsbSpeed(); }, DOC(dai, DeviceBase, getUsbSpeed)) .def("getDeviceInfo", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceInfo(); }, DOC(dai, DeviceBase, getDeviceInfo)) .def("getMxId", [](DeviceBase& d) { py::gil_scoped_release release; return d.getMxId(); }, DOC(dai, DeviceBase, getMxId)) + .def("getProfilingData", [](DeviceBase& d) { py::gil_scoped_release release; return d.getProfilingData(); }, DOC(dai, DeviceBase, getProfilingData)) .def("readCalibration", [](DeviceBase& d) { py::gil_scoped_release release; return d.readCalibration(); }, DOC(dai, DeviceBase, readCalibration)) .def("flashCalibration", [](DeviceBase& d, CalibrationHandler calibrationDataHandler) { py::gil_scoped_release release; return d.flashCalibration(calibrationDataHandler); }, py::arg("calibrationDataHandler"), DOC(dai, DeviceBase, flashCalibration)) .def("setXLinkChunkSize", [](DeviceBase& d, int s) { py::gil_scoped_release release; d.setXLinkChunkSize(s); }, py::arg("sizeBytes"), DOC(dai, DeviceBase, setXLinkChunkSize)) diff --git a/src/XLinkBindings.cpp b/src/XLinkBindings.cpp index 080a0deb8..bb5475fd3 100644 --- a/src/XLinkBindings.cpp +++ b/src/XLinkBindings.cpp @@ -137,6 +137,7 @@ void XLinkBindings::bind(pybind11::module &m, void *pCallstack) .def_static("getFirstDevice", &XLinkConnection::getFirstDevice, py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("getDeviceByMxId", &XLinkConnection::getDeviceByMxId, py::arg("mxId"), py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("bootBootloader", &XLinkConnection::bootBootloader, py::arg("devInfo")) + .def_static("getGlobalProfilingData", &XLinkConnection::getGlobalProfilingData, DOC(dai, XLinkConnection, getGlobalProfilingData)) ; xLinkError diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 4c3d4d36f..cca7e769d 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -28,6 +28,7 @@ // depthai #include "depthai/common/CameraFeatures.hpp" #include "depthai/common/CameraExposureOffset.hpp" +#include "depthai/utility/ProfilingData.hpp" void CommonBindings::bind(pybind11::module& m, void* pCallstack){ @@ -60,6 +61,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ cameraExposureOffset(m, "CameraExposureOffset"); py::enum_ colormap(m, "Colormap", DOC(dai, Colormap)); py::enum_ frameEvent(m, "FrameEvent", DOC(dai, FrameEvent)); + py::class_ profilingData(m, "ProfilingData", DOC(dai, ProfilingData)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -375,4 +377,9 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("READOUT_END", FrameEvent::READOUT_END) ; + profilingData + .def_readwrite("numBytesWritten", &ProfilingData::numBytesWritten, DOC(dai, ProfilingData, numBytesWritten)) + .def_readwrite("numBytesRead", &ProfilingData::numBytesRead, DOC(dai, ProfilingData, numBytesRead)) + ; + } diff --git a/src/pipeline/datatype/ImgFrameBindings.cpp b/src/pipeline/datatype/ImgFrameBindings.cpp index bc2c8b768..b39b1e721 100644 --- a/src/pipeline/datatype/ImgFrameBindings.cpp +++ b/src/pipeline/datatype/ImgFrameBindings.cpp @@ -234,7 +234,7 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ + ", actual " + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); } else if(actualSize > requiredSize) { // FIXME check build on Windows - // spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); + // logger::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(img.getWidth() <= 0 || img.getHeight() <= 0){ throw std::runtime_error("ImgFrame size invalid (width: " + std::to_string(img.getWidth()) + ", height: " + std::to_string(img.getHeight()) + ")"); From cadde475de5eaf6db97557d3e54ee65f01212628 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Thu, 27 Apr 2023 03:20:26 +0200 Subject: [PATCH 27/66] [DeviceManager] improvements & [FW] Updated for IMX296 support --- depthai-core | 2 +- utilities/device_manager.py | 181 ++++++++++++++++++++++-------------- 2 files changed, 113 insertions(+), 70 deletions(-) diff --git a/depthai-core b/depthai-core index d27ad1e39..3793cf5a3 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit d27ad1e39528f2ecdd160d41feb5716790cb8309 +Subproject commit 3793cf5a3a556322ed6b8dfbf004a75d65238eba diff --git a/utilities/device_manager.py b/utilities/device_manager.py index ea2cf44e9..99814f10b 100755 --- a/utilities/device_manager.py +++ b/utilities/device_manager.py @@ -10,6 +10,7 @@ from typing import Dict import platform import os +import numpy if USE_OPENCV: # import cv2 @@ -20,10 +21,14 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +PLATFORM_ICON_PATH = None if platform.system() == 'Windows': - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.ico') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.ico' else: - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.png') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.png' + +# Apply icon globally +sg.set_global_icon(PLATFORM_ICON_PATH) CONF_TEXT_POE = ['ipTypeText', 'ipText', 'maskText', 'gatewayText', 'dnsText', 'dnsAltText', 'networkTimeoutText', 'macText'] CONF_INPUT_POE = ['staticBut', 'dynamicBut', 'ip', 'mask', 'gateway', 'dns', 'dnsAlt', 'networkTimeout', 'mac'] @@ -38,27 +43,35 @@ def PrintException(): filename = f.f_code.co_filename print('Exception in {}, line {}; {}'.format(filename, lineno, exc_obj)) -def check_ip(s: str, req = True): +def Popup(msg, window): + main_window_location = window.CurrentLocation() + main_window_size = window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - 50) // 2, + main_window_location[1] + (main_window_size[1] - 50) // 2) + return sg.Popup(msg, location=centered_location) + +def check_ip(window, s: str, req = True): if s == "": return not req spl = s.split(".") if len(spl) != 4: - sg.Popup("Wrong IP format.\nValue should be similar to 255.255.255.255") + Popup("Wrong IP format.\nValue should be similar to 255.255.255.255", window=window) return False for num in spl: if 255 < int(num): - sg.Popup("Wrong IP format.\nValues can not be above 255!") + Popup("Wrong IP format.\nValues can not be above 255!", window=window) return False return True -def check_mac(s): +def check_mac(window, s): if s.count(":") != 5: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False for i in s.split(":"): for j in i: if j > "F" or (j < "A" and not j.isdigit()) or len(i) != 2: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False return True @@ -113,7 +126,7 @@ def wait(self): class SelectIP: - def __init__(self): + def __init__(self, window): self.ok = False layout = [ [sg.Text("Specify the custom IP of the OAK PoE\ncamera you want to connect to")], @@ -124,15 +137,24 @@ def __init__(self): [sg.Submit(), sg.Cancel()], ] self.window = sg.Window("Specify IP", layout, size=(300,110), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + def wait(self): event, values = self.window.Read() self.window.close() - if str(event) == "Cancel" or values is None or not check_ip(values["ip"]): + if str(event) == "Cancel" or values is None or not check_ip(self.window, values["ip"]): return False, "" return True, values["ip"] class SearchDevice: - def __init__(self): + def __init__(self, window): self.infos = [] layout = [ [sg.Text("Select an OAK camera you would like to connect to.", font=('Arial', 10, 'bold'))], @@ -156,12 +178,22 @@ def __init__(self): [sg.Button('Search', size=(15, 2), font=('Arial', 10, 'bold'))], ] self.window = sg.Window("Select Device", layout, size=(550,375), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + self.search_devices() def search_devices(self): self.infos = dai.XLinkConnection.getAllConnectedDevices() if not self.infos: - sg.Popup("No devices found.") + pass + # sg.Popup("No devices found.") else: rows = [] for info in self.infos: @@ -257,21 +289,20 @@ def factoryReset(device: dai.DeviceInfo, type: dai.DeviceBootloader.Type): def connectAndStartStreaming(dev): - # OpenCV - if USE_OPENCV: + with dai.Device(dev) as d: # Create pipeline pipeline = dai.Pipeline() + # OpenCV + if USE_OPENCV: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + videnc = pipeline.create(dai.node.VideoEncoder) + videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) + xout = pipeline.create(dai.node.XLinkOut) + xout.setStreamName("mjpeg") + camRgb.video.link(videnc.input) + videnc.bitstream.link(xout.input) - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - videnc = pipeline.create(dai.node.VideoEncoder) - videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) - xout = pipeline.create(dai.node.XLinkOut) - xout.setStreamName("mjpeg") - camRgb.video.link(videnc.input) - videnc.bitstream.link(xout.input) - - with dai.Device(pipeline, dev) as d: while not d.isClosed(): mjpeg = d.getOutputQueue('mjpeg').get() frame = cv2.imdecode(mjpeg.getData(), cv2.IMREAD_UNCHANGED) @@ -279,21 +310,22 @@ def connectAndStartStreaming(dev): if cv2.waitKey(1) == ord('q'): cv2.destroyWindow('Color Camera') break - else: - # Create pipeline (no opencv) - pipeline = dai.Pipeline() - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - camRgb.setPreviewSize(camRgb.getIspSize()) - camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) - - xout = pipeline.create(dai.node.XLinkOut) - xout.input.setQueueSize(2) - xout.input.setBlocking(False) - xout.setStreamName("color") - camRgb.preview.link(xout.input) - - with dai.Device(pipeline, dev) as d: + else: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + firstSensor = d.getConnectedCameraFeatures()[0] + camRgb.setPreviewSize(firstSensor.width // 3, firstSensor.height // 3) + camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) + + xout = pipeline.create(dai.node.XLinkOut) + xout.input.setQueueSize(2) + xout.input.setBlocking(False) + xout.setStreamName("color") + camRgb.preview.link(xout.input) + + # Start pipeline + d.startPipeline(pipeline) + frame = d.getOutputQueue('color', 2, False).get() width, height = frame.getWidth(), frame.getHeight() @@ -540,7 +572,7 @@ class DeviceManager: def __init__(self) -> None: self.window = sg.Window(title="Device Manager", - icon="assets/icon.png", + icon=PLATFORM_ICON_PATH, layout=layout, size=(645, 380), finalize=True # So we can do First search for devices @@ -556,7 +588,7 @@ def isPoE(self) -> bool: return self.bl.getType() == dai.DeviceBootloader.Type.NETWORK except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', self.window) def isUsb(self) -> bool: return not self.isPoE() @@ -577,7 +609,7 @@ def run(self) -> None: device = self.device if deviceStateTxt(device.state) == "BOOTED": # device is already booted somewhere else - sg.Popup("Device is already booted somewhere else!") + Popup("Device is already booted somewhere else!", self.window) else: self.resetGui() self.bl = connectToDevice(device) @@ -588,7 +620,7 @@ def run(self) -> None: self.window.Element('progress').update("No device selected.") elif event == "Search": self.getDevices() # Re-search devices for dropdown - selDev = SearchDevice() + selDev = SearchDevice(window=self.window) di = selDev.wait() if di is not None: self.resetGui() @@ -601,7 +633,7 @@ def run(self) -> None: self.getConfigs() self.unlockConfig() elif event == "Specify IP": - select = SelectIP() + select = SelectIP(window=self.window) ok, ip = select.wait() if ok: self.resetGui() @@ -655,14 +687,14 @@ def run(self) -> None: print("Factory reset cancelled.") elif event == "Flash configuration": - self.flashConfig() - self.getConfigs() - self.resetGui() - if self.isUsb(): - self.unlockConfig() - else: - self.devices.clear() - self.window.Element('devices').update("Search for devices", values=[]) + if self.flashConfig() is not None: + self.getConfigs() + self.resetGui() + if self.isUsb(): + self.unlockConfig() + else: + self.devices.clear() + self.window.Element('devices').update("Search for devices", values=[]) elif event == "Clear configuration": self.clearConfig() self.getConfigs() @@ -677,7 +709,7 @@ def run(self) -> None: confJson = self.bl.readConfigData() sg.popup_scrolled(confJson, title='Configuration') except Exception as ex: - sg.popup(f'No existing config to view ({ex})') + Popup(f'No existing config to view ({ex})', self.window) elif event == "Flash application": file = sg.popup_get_file("Select .dap file", file_types=(('DepthAI Application Package', '*.dap'), ('All Files', '*.* *'))) @@ -685,9 +717,9 @@ def run(self) -> None: elif event == "Remove application": try: self.bl.flashClear() - sg.popup(f'Successfully removed application') + Popup(f'Successfully removed application', self.window) except Exception as ex: - sg.popup(f"Couldn't remove application ({ex})") + sg.popup(f"Couldn't remove application ({ex})", self.window) elif event.startswith("_unique_configBtn"): self.window['-COL1-'].update(visible=False) @@ -713,7 +745,7 @@ def run(self) -> None: elif event == "recoveryMode": if recoveryMode(self.bl): - sg.Popup(f'Device successfully put into USB recovery mode.') + Popup(f'Device successfully put into USB recovery mode.', self.window) # Device will reboot, close previous and reset GUI self.closeDevice() self.resetGui() @@ -872,6 +904,12 @@ def resetGui(self): self.window.Element('commit').update("-version-") self.window.Element('devState').update("-state-") + # Move back to 'About' page + self.window['-COL1-'].update(visible=True) + self.window['-COL2-'].update(visible=False) + self.window['-COL3-'].update(visible=False) + self.window['-COL4-'].update(visible=False) + def closeDevice(self): if self.bl is not None: self.bl.close() @@ -885,7 +923,7 @@ def getDevices(self): deviceInfos = dai.XLinkConnection.getAllConnectedDevices() if not deviceInfos: self.window.Element('devices').update("No devices") - sg.Popup("No devices found.") + # sg.Popup("No devices found.") else: for deviceInfo in deviceInfos: deviceTxt = deviceInfo.getMxId() @@ -896,7 +934,7 @@ def getDevices(self): self.window.Element('devices').update("Select device", values=listedDevices) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) def flashConfig(self): values = self.values @@ -912,12 +950,12 @@ def flashConfig(self): try: if self.isPoE: if self.values['staticBut']: - if check_ip(values['ip']) and check_ip(values['mask']) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip']) and check_ip(self.window, values['mask']) and check_ip(self.window, values['gateway'], req=False): conf.setStaticIPv4(values['ip'], values['mask'], values['gateway']) else: raise Exception('IP or Mask missing using static IP configuration') else: - if check_ip(values['ip'], req=False) and check_ip(values['mask'], req=False) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip'], req=False) and check_ip(self.window, values['mask'], req=False) and check_ip(self.window, values['gateway'], req=False): conf.setDynamicIPv4(values['ip'], values['mask'], values['gateway']) conf.setDnsIPv4(values['dns'], values['dnsAlt']) @@ -925,9 +963,9 @@ def flashConfig(self): if int(values['networkTimeout']) >= 0: conf.setNetworkTimeout(timedelta(seconds=int(values['networkTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['mac'] != "": - if check_mac(values['mac']): + if check_mac(self.window, values['mac']): conf.setMacAddress(values['mac']) else: conf.setMacAddress('00:00:00:00:00:00') @@ -936,29 +974,34 @@ def flashConfig(self): if int(values['usbTimeout']) >= 0: conf.setUsbTimeout(timedelta(seconds=int(values['usbTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['usbSpeed'] != "": conf.setUsbMaxSpeed(getattr(dai.UsbSpeed, values['usbSpeed'])) success, error = self.bl.flashConfig(conf) if not success: - sg.Popup(f"Flashing failed: {error}") + Popup(f"Flashing failed: {error}", window=self.window) + return False else: - sg.Popup("Flashing successful.") + Popup("Flashing successful.", window=self.window) + return True + except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) + + return None def clearConfig(self): try: success, error = self.bl.flashConfigClear() if not success: - sg.Popup(f"Clearing configuration failed: {error}") + Popup(f"Clearing configuration failed: {error}", window=self.window) else: - sg.Popup("Successfully cleared configuration.") + Popup("Successfully cleared configuration.", window=self.window) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) app = DeviceManager() From cc6ea3e147408c275b477494bfce44749b010572 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Tue, 2 May 2023 05:41:47 +0300 Subject: [PATCH 28/66] cam_test.py: fix KeyError due to socket name changes, add -cams cama/camb/camc options --- utilities/cam_test.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 6d04596e7..70ff2f0ce 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -48,7 +48,7 @@ def socket_type_pair(arg): socket, type = arg.split(',') - if not (socket in ['rgb', 'left', 'right', 'camd']): + if not (socket in ['rgb', 'left', 'right', 'cama', 'camb', 'camc', 'camd']): raise ValueError("") if not (type in ['m', 'mono', 'c', 'color']): raise ValueError("") @@ -112,20 +112,19 @@ def socket_type_pair(arg): 'rgb' : dai.CameraBoardSocket.CAM_A, 'left' : dai.CameraBoardSocket.CAM_B, 'right': dai.CameraBoardSocket.CAM_C, + 'cama' : dai.CameraBoardSocket.CAM_A, + 'camb' : dai.CameraBoardSocket.CAM_B, + 'camc' : dai.CameraBoardSocket.CAM_C, 'camd' : dai.CameraBoardSocket.CAM_D, } -cam_socket_to_name = { - 'RGB': 'rgb', - 'LEFT': 'left', - 'RIGHT': 'right', - 'CAM_D': 'camd', -} - rotate = { 'rgb': args.rotate in ['all', 'rgb'], 'left': args.rotate in ['all', 'mono'], 'right': args.rotate in ['all', 'mono'], + 'cama': args.rotate in ['all', 'rgb'], + 'camb': args.rotate in ['all', 'mono'], + 'camc': args.rotate in ['all', 'mono'], 'camd': args.rotate in ['all', 'rgb'], } @@ -231,7 +230,7 @@ def exit_cleanly(signum, frame): f' -socket {p.socket.name:6}: {p.sensorName:6} {p.width:4} x {p.height:4} focus:', end='') print('auto ' if p.hasAutofocus else 'fixed', '- ', end='') print(*[type.name for type in p.supportedTypes]) - cam_name[cam_socket_to_name[p.socket.name]] = p.sensorName + cam_name[p.socket.name] = p.sensorName print('USB speed:', device.getUsbSpeed().name) @@ -311,7 +310,7 @@ def exit_cleanly(signum, frame): frame = pkt.getCvFrame() if c in capture_list: width, height = pkt.getWidth(), pkt.getHeight() - capture_file_name = ('capture_' + c + '_' + cam_name[c] + capture_file_name = ('capture_' + c + '_' + cam_name[cam_socket_opts[c].name] + '_' + str(width) + 'x' + str(height) + '_exp_' + str(int( From 7533b5a9bfc5b8648612cd5a1562a9295a6e9020 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 2 May 2023 23:36:37 +0200 Subject: [PATCH 29/66] Updated XLink with 255.255.255.255 discovery added --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index cb80458e7..d7ee0f7bb 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit cb80458e799923f4ac749ef34c7f69326a32f02a +Subproject commit d7ee0f7bba653e9c9857ca482f656ce4d67bdf59 From e3b975bdf3f1984e148c6f66abdd2b918113be14 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 2 May 2023 23:37:07 +0200 Subject: [PATCH 30/66] Applied formatting --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index d7ee0f7bb..c8187b554 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit d7ee0f7bba653e9c9857ca482f656ce4d67bdf59 +Subproject commit c8187b5548d3862b45c4811d40e0e73e92898179 From 1ca3ab68eb2e152afafc34bc622f9e67f7ee8315 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Wed, 3 May 2023 00:31:49 +0200 Subject: [PATCH 31/66] [Stereo] Fix auto distortion correction for 400p --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index c8187b554..8ed7b6a43 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit c8187b5548d3862b45c4811d40e0e73e92898179 +Subproject commit 8ed7b6a4352ebe5ea65f2861ce9e09a3f3574851 From c83551c057576c53439e65928d15cebd31e59cab Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Wed, 3 May 2023 01:33:41 +0200 Subject: [PATCH 32/66] [Stereo] Fix temporal filter crash on startup --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 8ed7b6a43..9096d62e3 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 8ed7b6a4352ebe5ea65f2861ce9e09a3f3574851 +Subproject commit 9096d62e3598afbc005f20d7d9f0420470da7234 From f26d0ca43b574dec9d5c05c97cafbff0e721d2c9 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Fri, 5 May 2023 13:35:46 +0200 Subject: [PATCH 33/66] Add missing info log level --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 9096d62e3..41681497a 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 9096d62e3598afbc005f20d7d9f0420470da7234 +Subproject commit 41681497a365de42562eceea35528cb4e25689df From 26c1b3bd85c0330d5668c1f83978333bd48488eb Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sat, 6 May 2023 23:00:36 +0300 Subject: [PATCH 34/66] core/Logging: fixed `DEPTHAI_DEBUG=1` causing a crash at init --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 41681497a..1d990f993 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 41681497a365de42562eceea35528cb4e25689df +Subproject commit 1d990f993764204220ca799c04f78fa0cad9b570 From 4d3192f6ddd6626f16718f5659feb6324cf0e7a3 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 7 May 2023 14:02:24 +0300 Subject: [PATCH 35/66] Add BoardConfig bindings for USB product/vendor strings and UVC config --- src/DeviceBindings.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 868322e43..18f138f39 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -343,6 +343,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ boardConfigGpioPull(boardConfigGpio, "Pull", DOC(dai, BoardConfig, GPIO, Pull)); py::enum_ boardConfigGpioDrive(boardConfigGpio, "Drive", DOC(dai, BoardConfig, GPIO, Drive)); py::class_ boardConfigUart(boardConfig, "UART", DOC(dai, BoardConfig, UART)); + py::class_ boardConfigUvc(boardConfig, "UVC", DOC(dai, BoardConfig, UVC)); struct PyClock{}; py::class_ clock(m, "Clock"); @@ -379,6 +380,8 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("flashBootedVid", &BoardConfig::USB::flashBootedVid) .def_readwrite("flashBootedPid", &BoardConfig::USB::flashBootedPid) .def_readwrite("maxSpeed", &BoardConfig::USB::maxSpeed) + .def_readwrite("productName", &BoardConfig::USB::productName) + .def_readwrite("manufacturer", &BoardConfig::USB::manufacturer) ; // Bind BoardConfig::Network @@ -456,6 +459,17 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("tmp", &BoardConfig::UART::tmp) ; + // Bind BoardConfig::UVC + boardConfigUvc + .def(py::init<>()) + .def(py::init()) + .def_readwrite("cameraName", &BoardConfig::UVC::cameraName) + .def_readwrite("width", &BoardConfig::UVC::width) + .def_readwrite("height", &BoardConfig::UVC::height) + .def_readwrite("frameType", &BoardConfig::UVC::frameType) + .def_readwrite("enable", &BoardConfig::UVC::enable) + ; + // Bind BoardConfig boardConfig .def(py::init<>()) @@ -474,6 +488,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("logSizeMax", &BoardConfig::logSizeMax, DOC(dai, BoardConfig, logSizeMax)) .def_readwrite("logVerbosity", &BoardConfig::logVerbosity, DOC(dai, BoardConfig, logVerbosity)) .def_readwrite("logDevicePrints", &BoardConfig::logDevicePrints, DOC(dai, BoardConfig, logDevicePrints)) + .def_readwrite("uvc", &BoardConfig::uvc, DOC(dai, BoardConfig, uvc)) ; // Bind Device::Config From 9920badc11adb303c5b9ea9efb1aa77798b75e8b Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 7 May 2023 14:04:17 +0300 Subject: [PATCH 36/66] Move rgb_uvc.py to examples/ColorCamera --- examples/{ => ColorCamera}/rgb_uvc.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{ => ColorCamera}/rgb_uvc.py (100%) diff --git a/examples/rgb_uvc.py b/examples/ColorCamera/rgb_uvc.py similarity index 100% rename from examples/rgb_uvc.py rename to examples/ColorCamera/rgb_uvc.py From 145b877a8298cad332d3d3c72e3b8ccb14af7875 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 7 May 2023 14:56:45 +0300 Subject: [PATCH 37/66] rgb_uvc.py: add UVC boardConfig, `--load-and-exit` script option. Flashing is possible as well on certain devices --- examples/ColorCamera/rgb_uvc.py | 50 +++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/examples/ColorCamera/rgb_uvc.py b/examples/ColorCamera/rgb_uvc.py index 00f7d76aa..42e510c95 100755 --- a/examples/ColorCamera/rgb_uvc.py +++ b/examples/ColorCamera/rgb_uvc.py @@ -4,20 +4,20 @@ import time import argparse -enable_4k = True # Will downscale 4K -> 1080p - parser = argparse.ArgumentParser() parser.add_argument('-fb', '--flash-bootloader', default=False, action="store_true") parser.add_argument('-f', '--flash-app', default=False, action="store_true") +parser.add_argument('-l', '--load-and-exit', default=False, action="store_true") args = parser.parse_args() def getPipeline(): - # Start defining a pipeline + enable_4k = True # Will downscale 4K -> 1080p + pipeline = dai.Pipeline() # Define a source - color camera cam_rgb = pipeline.createColorCamera() - cam_rgb.setBoardSocket(dai.CameraBoardSocket.RGB) + cam_rgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam_rgb.setInterleaved(False) #cam_rgb.initialControl.setManualFocus(130) @@ -27,25 +27,29 @@ def getPipeline(): else: cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) - # Create an UVC (USB Video Class) output node. It needs 1920x1080, NV12 input + # Create an UVC (USB Video Class) output node uvc = pipeline.createUVC() cam_rgb.video.link(uvc.input) - return pipeline + # Note: if the pipeline is sent later to device (using startPipeline()), + # it is important to pass the device config separately when creating the device + config = dai.Device.Config() + # config.board.uvc = dai.BoardConfig.UVC() # enable default 1920x1080 NV12 + config.board.uvc = dai.BoardConfig.UVC(1920, 1080) + config.board.uvc.frameType = dai.ImgFrame.Type.NV12 + # config.board.uvc.cameraName = "My Custom Cam" + pipeline.setBoardConfig(config.board) -# Workaround for a bug with the timeout-enabled bootloader -progressCalled = False -# TODO move this under flash(), will need to handle `progressCalled` differently -def progress(p): - global progressCalled - progressCalled = True - print(f'Flashing progress: {p*100:.1f}%') + return pipeline # Will flash the bootloader if no pipeline is provided as argument def flash(pipeline=None): (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() bootloader = dai.DeviceBootloader(bl, True) + # Create a progress callback lambda + progress = lambda p : print(f'Flashing progress: {p*100:.1f}%') + startTime = time.monotonic() if pipeline is None: print("Flashing bootloader...") @@ -54,8 +58,6 @@ def flash(pipeline=None): print("Flashing application pipeline...") bootloader.flash(progress, pipeline) - if not progressCalled: - raise RuntimeError('Flashing failed, please try again') elapsedTime = round(time.monotonic() - startTime, 2) print("Done in", elapsedTime, "seconds") @@ -65,11 +67,23 @@ def flash(pipeline=None): print("Flashing successful. Please power-cycle the device") quit() -# Pipeline defined, now the device is connected to +if args.load_and_exit: + import os + # Disabling device watchdog, so it doesn't need the host to ping periodically + os.environ["DEPTHAI_WATCHDOG"] = "0" + device = dai.Device(getPipeline()) + print("\nDevice started, open a UVC viewer to check the camera stream.") + print("Attempting to force-quit this process...") + print("To reconnect with depthai, a device power-cycle may be required") + # We do not want the device to be closed, so kill the process. + # (TODO add depthai API to be able to cleanly exit without closing device) + import signal + os.kill(os.getpid(),signal.SIGKILL) + +# Standard UVC load with depthai with dai.Device(getPipeline()) as device: print("\nDevice started, please keep this process running") - print("and open an UVC viewer. Example on Linux:") - print(" guvcview -d /dev/video0") + print("and open an UVC viewer to check the camera stream.") print("\nTo close: Ctrl+C") # Doing nothing here, just keeping the host feeding the watchdog From 33907177d3c773e4e84d29f738a9e2c34ba11aa7 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Wed, 10 May 2023 18:03:16 +0200 Subject: [PATCH 38/66] CrashDump: add optional clear of crash dump, enabled by default --- depthai-core | 2 +- src/DeviceBindings.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/depthai-core b/depthai-core index 1d990f993..bdbbf828d 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 1d990f993764204220ca799c04f78fa0cad9b570 +Subproject commit bdbbf828da5c392c8e028a83715257b30c91b18f diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 868322e43..443034438 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -593,7 +593,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getLogLevel", [](DeviceBase& d) { py::gil_scoped_release release; return d.getLogLevel(); }, DOC(dai, DeviceBase, getLogLevel)) .def("setSystemInformationLoggingRate", [](DeviceBase& d, float hz) { py::gil_scoped_release release; d.setSystemInformationLoggingRate(hz); }, py::arg("rateHz"), DOC(dai, DeviceBase, setSystemInformationLoggingRate)) .def("getSystemInformationLoggingRate", [](DeviceBase& d) { py::gil_scoped_release release; return d.getSystemInformationLoggingRate(); }, DOC(dai, DeviceBase, getSystemInformationLoggingRate)) - .def("getCrashDump", [](DeviceBase& d) { py::gil_scoped_release release; return d.getCrashDump(); }, DOC(dai, DeviceBase, getCrashDump)) + .def("getCrashDump", [](DeviceBase& d, bool clearCrashDump) { py::gil_scoped_release release; return d.getCrashDump(clearCrashDump); }, py::arg("clearCrashDump") = true, DOC(dai, DeviceBase, getCrashDump)) .def("hasCrashDump", [](DeviceBase& d) { py::gil_scoped_release release; return d.hasCrashDump(); }, DOC(dai, DeviceBase, hasCrashDump)) .def("getConnectedCameras", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedCameras(); }, DOC(dai, DeviceBase, getConnectedCameras)) .def("getConnectedCameraFeatures", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedCameraFeatures(); }, DOC(dai, DeviceBase, getConnectedCameraFeatures)) From c4b58d66d2d8d325c416594948ac55b09590f849 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Wed, 11 Jan 2023 05:26:04 +0200 Subject: [PATCH 39/66] cam_test: add `-raw` for streaming/saving raw frames (+ISP YUV) --- utilities/cam_test.py | 76 +++++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 20 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 491ae52cf..3ae24ae22 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -36,6 +36,7 @@ #os.environ["DEPTHAI_LEVEL"] = "debug" import cv2 +import numpy as np import argparse import collections import time @@ -80,6 +81,8 @@ def socket_type_pair(arg): help="Path to custom camera tuning database") parser.add_argument('-d', '--device', default="", type=str, help="Optional MX ID of the device to connect to.") +parser.add_argument('-raw', '--enable-raw', default=False, action="store_true", + help='Enable the RAW camera streams') parser.add_argument('-ctimeout', '--connection-timeout', default=30000, help="Connection timeout in ms. Default: %(default)s (sets DEPTHAI_CONNECTION_TIMEOUT environment variable)") @@ -178,9 +181,12 @@ def get(self): cam = {} xout = {} +xout_raw = {} +streams = [] for c in cam_list: xout[c] = pipeline.createXLinkOut() xout[c].setStreamName(c) + streams.append(c) if cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) @@ -206,6 +212,15 @@ def get(self): cam[c].setFps(args.fps) cam[c].setIsp3aFps(args.isp3afps) + if args.enable_raw: + raw_name = 'raw_' + c + xout_raw[c] = pipeline.create(dai.node.XLinkOut) + xout_raw[c].setStreamName(raw_name) + if args.enable_raw: + streams.append(raw_name) + cam[c].raw.link(xout_raw[c].input) + # to be added cam[c].setRawOutputPacked(False) + if args.camera_tuning: pipeline.setCameraTuningBlobPath(str(args.camera_tuning)) @@ -232,6 +247,8 @@ def exit_cleanly(signum, frame): print('auto ' if p.hasAutofocus else 'fixed', '- ', end='') print(*[type.name for type in p.supportedTypes]) cam_name[p.socket.name] = p.sensorName + if args.enable_raw: + cam_name['raw_'+p.socket.name] = p.sensorName print('USB speed:', device.getUsbSpeed().name) @@ -240,7 +257,7 @@ def exit_cleanly(signum, frame): q = {} fps_host = {} # FPS computed based on the time we receive frames in app fps_capt = {} # FPS computed based on capture timestamps from device - for c in cam_list: + for c in streams: q[c] = device.getOutputQueue(name=c, maxSize=4, blocking=False) # The OpenCV window resize may produce some artifacts if args.resizable_windows: @@ -299,7 +316,7 @@ def exit_cleanly(signum, frame): capture_list = [] while True: - for c in cam_list: + for c in streams: try: pkt = q[c].tryGet() except Exception as e: @@ -308,25 +325,44 @@ def exit_cleanly(signum, frame): if pkt is not None: fps_host[c].update() fps_capt[c].update(pkt.getTimestamp().total_seconds()) + width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() - if c in capture_list: - width, height = pkt.getWidth(), pkt.getHeight() - capture_file_name = ('capture_' + c + '_' + cam_name[cam_socket_opts[c].name] - + '_' + str(width) + 'x' + str(height) - + '_exp_' + - str(int( - pkt.getExposureTime().total_seconds()*1e6)) - + '_iso_' + str(pkt.getSensitivity()) - + '_lens_' + - str(pkt.getLensPosition()) - + '_' + capture_time - + '_' + str(pkt.getSequenceNum()) - + ".png" - ) - print("\nSaving:", capture_file_name) - cv2.imwrite(capture_file_name, frame) + capture = c in capture_list + if capture: + capture_file_info = ('capture_' + c + '_' + cam_name[c] + + '_' + str(width) + 'x' + str(height) + + '_exp_' + str(int(pkt.getExposureTime().total_seconds()*1e6)) + + '_iso_' + str(pkt.getSensitivity()) + + '_lens_' + str(pkt.getLensPosition()) + + '_' + capture_time + + '_' + str(pkt.getSequenceNum()) + ) capture_list.remove(c) - + print() + if c.startswith('raw_'): + if capture: + filename = capture_file_info + '_10bit.bw' + print('Saving:', filename) + frame.tofile(filename) + # Full range for display, use bits [15:6] of the 16-bit pixels + frame = frame * (1 << 6) + # Debayer color for preview/png + if cam_type_color[c.split('_')[-1]]: + # See this for the ordering, at the end of page: + # https://docs.opencv.org/4.5.1/de/d25/imgproc_color_conversions.html + # TODO add bayer order to ImgFrame getType() + frame = cv2.cvtColor(frame, cv2.COLOR_BayerGB2BGR) + else: + # Save YUV too, but only when RAW is also enabled (for tuning purposes) + if capture and args.enable_raw: + payload = pkt.getData() + filename = capture_file_info + '_P420.yuv' + print('Saving:', filename) + payload.tofile(filename) + if capture: + filename = capture_file_info + '.png' + print('Saving:', filename) + cv2.imwrite(filename, frame) cv2.imshow(c, frame) print("\rFPS:", *["{:6.2f}|{:6.2f}".format(fps_host[c].get(), @@ -337,7 +373,7 @@ def exit_cleanly(signum, frame): if key == ord('q'): break elif key == ord('c'): - capture_list = cam_list.copy() + capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') elif key == ord('t'): print("Autofocus trigger (and disable continuous)") From ae60f11d3b5c2ded5e2c7350af36fc4e564a321b Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 15 Jan 2023 14:43:58 +0200 Subject: [PATCH 40/66] cam_test.py: `/` to toggle printing camera settings: exp/ISO/lens/colortemp --- utilities/cam_test.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 3ae24ae22..ef0fc2cac 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -30,6 +30,8 @@ For the 'Select control: ...' options, use these keys to modify the value: '-' or '_' to decrease '+' or '=' to increase + +'/' to toggle printing camera settings: exposure, ISO, lens position, color temperature """ import os @@ -310,6 +312,7 @@ def exit_cleanly(signum, frame): luma_denoise = 0 chroma_denoise = 0 control = 'none' + show = False print("Cam:", *[' ' + c.ljust(8) for c in cam_list], "[host | capture timestamp]") @@ -327,6 +330,13 @@ def exit_cleanly(signum, frame): fps_capt[c].update(pkt.getTimestamp().total_seconds()) width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() + if show: + txt = f"[{c:5}, {pkt.getSequenceNum():4}] " + txt += f"Exp: {pkt.getExposureTime().total_seconds()*1000:6.3f} ms, " + txt += f"ISO: {pkt.getSensitivity():4}, " + txt += f"Lens pos: {pkt.getLensPosition():3}, " + txt += f"Color temp: {pkt.getColorTemperature()} K" + print(txt) capture = c in capture_list if capture: capture_file_info = ('capture_' + c + '_' + cam_name[c] @@ -365,13 +375,17 @@ def exit_cleanly(signum, frame): cv2.imwrite(filename, frame) cv2.imshow(c, frame) print("\rFPS:", - *["{:6.2f}|{:6.2f}".format(fps_host[c].get(), - fps_capt[c].get()) for c in cam_list], - end='', flush=True) + *["{:6.2f}|{:6.2f}".format(fps_host[c].get(), fps_capt[c].get()) for c in cam_list], + end=' ', flush=True) + if show: print() key = cv2.waitKey(1) if key == ord('q'): break + elif key == ord('/'): + show = not show + # Print empty string as FPS status new-line separator + print("" if show else "Printing camera settings: OFF") elif key == ord('c'): capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') From 5493fb643524460c55a7f149b8f8758492644e61 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 17 Mar 2023 05:36:41 +0200 Subject: [PATCH 41/66] Color/Mono/Camera: add `setRawOutputPacked` config -- `core` updated in upcoming commit --- src/pipeline/node/CameraBindings.cpp | 1 + src/pipeline/node/ColorCameraBindings.cpp | 2 ++ src/pipeline/node/MonoCameraBindings.cpp | 2 ++ 3 files changed, 5 insertions(+) diff --git a/src/pipeline/node/CameraBindings.cpp b/src/pipeline/node/CameraBindings.cpp index d99d0eab0..2c52bb282 100644 --- a/src/pipeline/node/CameraBindings.cpp +++ b/src/pipeline/node/CameraBindings.cpp @@ -159,6 +159,7 @@ void bind_camera(pybind11::module& m, void* pCallstack){ .def("setCalibrationAlpha", &Camera::setCalibrationAlpha, py::arg("alpha"), DOC(dai, node, Camera, setCalibrationAlpha)) .def("getCalibrationAlpha", &Camera::getCalibrationAlpha, DOC(dai, node, Camera, getCalibrationAlpha)) + .def("setRawOutputPacked", &Camera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, Camera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("Camera").attr("Properties") = cameraProperties; diff --git a/src/pipeline/node/ColorCameraBindings.cpp b/src/pipeline/node/ColorCameraBindings.cpp index 0816ffefd..fb345d221 100644 --- a/src/pipeline/node/ColorCameraBindings.cpp +++ b/src/pipeline/node/ColorCameraBindings.cpp @@ -188,6 +188,8 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("getIspNumFramesPool", &ColorCamera::getIspNumFramesPool, DOC(dai, node, ColorCamera, getIspNumFramesPool)) .def("setCamera", &ColorCamera::setCamera, py::arg("name"), DOC(dai, node, ColorCamera, setCamera)) .def("getCamera", &ColorCamera::getCamera, DOC(dai, node, ColorCamera, getCamera)) + + .def("setRawOutputPacked", &ColorCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, ColorCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("ColorCamera").attr("Properties") = colorCameraProperties; diff --git a/src/pipeline/node/MonoCameraBindings.cpp b/src/pipeline/node/MonoCameraBindings.cpp index 41c3b981f..51365fda0 100644 --- a/src/pipeline/node/MonoCameraBindings.cpp +++ b/src/pipeline/node/MonoCameraBindings.cpp @@ -92,6 +92,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getRawNumFramesPool", &MonoCamera::getRawNumFramesPool, DOC(dai, node, MonoCamera, getRawNumFramesPool)) .def("setCamera", &MonoCamera::setCamera, py::arg("name"), DOC(dai, node, MonoCamera, setCamera)) .def("getCamera", &MonoCamera::getCamera, DOC(dai, node, MonoCamera, getCamera)) + + .def("setRawOutputPacked", &MonoCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, MonoCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("MonoCamera").attr("Properties") = monoCameraProperties; From c3ddc3995a9753c1c96bea60ce87198ef82a441f Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 17 Mar 2023 05:41:11 +0200 Subject: [PATCH 42/66] cam_test.py: `-raw` streaming unpacked --- utilities/cam_test.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index ef0fc2cac..5e9ceb5eb 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -212,7 +212,8 @@ def get(self): if rotate[c]: cam[c].setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) cam[c].setFps(args.fps) - cam[c].setIsp3aFps(args.isp3afps) + if args.isp3afps: + cam[c].setIsp3aFps(args.isp3afps) if args.enable_raw: raw_name = 'raw_' + c @@ -221,7 +222,7 @@ def get(self): if args.enable_raw: streams.append(raw_name) cam[c].raw.link(xout_raw[c].input) - # to be added cam[c].setRawOutputPacked(False) + cam[c].setRawOutputPacked(False) if args.camera_tuning: pipeline.setCameraTuningBlobPath(str(args.camera_tuning)) @@ -355,7 +356,11 @@ def exit_cleanly(signum, frame): print('Saving:', filename) frame.tofile(filename) # Full range for display, use bits [15:6] of the 16-bit pixels - frame = frame * (1 << 6) + type = pkt.getType() + multiplier = 1 + if type == dai.ImgFrame.Type.RAW10: multiplier = (1 << (16-10)) + if type == dai.ImgFrame.Type.RAW12: multiplier = (1 << (16-4)) + frame = frame * multiplier # Debayer color for preview/png if cam_type_color[c.split('_')[-1]]: # See this for the ordering, at the end of page: From bddb42e52e4ea54c00bec73acaf448ca7d236978 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Sun, 9 Apr 2023 23:05:54 +0300 Subject: [PATCH 43/66] cam_test.py: some fixes for ToF: `-cams rgb,t` and other options --- utilities/cam_test.py | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 5e9ceb5eb..9730e0d54 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -182,14 +182,24 @@ def get(self): control.setStreamName('control') cam = {} +tof = {} xout = {} xout_raw = {} streams = [] for c in cam_list: + tofEnableRaw = False xout[c] = pipeline.createXLinkOut() xout[c].setStreamName(c) streams.append(c) - if cam_type_color[c]: + if cam_type_tof[c]: + cam[c] = pipeline.create(dai.node.ColorCamera) # .Camera + if args.tof_raw: + tofEnableRaw = True + else: + tof[c] = pipeline.create(dai.node.ToF) + cam[c].raw.link(tof[c].input) + tof[c].depth.link(xout[c].input) + elif cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) cam[c].setIspScale(1, args.isp_downscale) @@ -215,12 +225,11 @@ def get(self): if args.isp3afps: cam[c].setIsp3aFps(args.isp3afps) - if args.enable_raw: + if args.enable_raw or tofEnableRaw: raw_name = 'raw_' + c xout_raw[c] = pipeline.create(dai.node.XLinkOut) xout_raw[c].setStreamName(raw_name) - if args.enable_raw: - streams.append(raw_name) + streams.append(raw_name) cam[c].raw.link(xout_raw[c].input) cam[c].setRawOutputPacked(False) @@ -331,6 +340,14 @@ def exit_cleanly(signum, frame): fps_capt[c].update(pkt.getTimestamp().total_seconds()) width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() + if cam_type_tof[c.split('_')[-1]] and not c.startswith('raw_'): + if args.tof_cm: + # pixels represent `cm`, capped to 255. Value can be checked hovering the mouse + frame = (frame // 10).clip(0, 255).astype(np.uint8) + else: + frame = (frame.view(np.int16).astype(float)) + frame = cv2.normalize(frame, frame, alpha=255, beta=0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) + frame = cv2.applyColorMap(frame, jet_custom) if show: txt = f"[{c:5}, {pkt.getSequenceNum():4}] " txt += f"Exp: {pkt.getExposureTime().total_seconds()*1000:6.3f} ms, " @@ -361,7 +378,7 @@ def exit_cleanly(signum, frame): if type == dai.ImgFrame.Type.RAW10: multiplier = (1 << (16-10)) if type == dai.ImgFrame.Type.RAW12: multiplier = (1 << (16-4)) frame = frame * multiplier - # Debayer color for preview/png + # Debayer as color for preview/png if cam_type_color[c.split('_')[-1]]: # See this for the ordering, at the end of page: # https://docs.opencv.org/4.5.1/de/d25/imgproc_color_conversions.html From 32831ae99bc05cadd8bb07717a1f490a20a94119 Mon Sep 17 00:00:00 2001 From: Florin Buica Date: Fri, 21 Apr 2023 14:51:10 +0300 Subject: [PATCH 44/66] adding bindings for ToF --- CMakeLists.txt | 3 + depthai-core | 2 +- src/pipeline/datatype/ToFConfigBindings.cpp | 71 +++++++++++++++++++++ src/pipeline/node/ToFBindings.cpp | 47 ++++++++++++++ utilities/cam_test.py | 12 +++- 5 files changed, 132 insertions(+), 3 deletions(-) create mode 100644 src/pipeline/datatype/ToFConfigBindings.cpp create mode 100644 src/pipeline/node/ToFBindings.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 2183df54f..35c956c01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -124,9 +124,11 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/node/IMUBindings.cpp src/pipeline/node/EdgeDetectorBindings.cpp src/pipeline/node/FeatureTrackerBindings.cpp + src/pipeline/node/ToFBindings.cpp src/pipeline/node/AprilTagBindings.cpp src/pipeline/node/DetectionParserBindings.cpp src/pipeline/node/WarpBindings.cpp + src/pipeline/node/ToFBindings.cpp src/pipeline/datatype/ADatatypeBindings.cpp src/pipeline/datatype/AprilTagConfigBindings.cpp @@ -135,6 +137,7 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/datatype/CameraControlBindings.cpp src/pipeline/datatype/EdgeDetectorConfigBindings.cpp src/pipeline/datatype/FeatureTrackerConfigBindings.cpp + src/pipeline/datatype/ToFConfigBindings.cpp src/pipeline/datatype/ImageManipConfigBindings.cpp src/pipeline/datatype/ImgDetectionsBindings.cpp src/pipeline/datatype/ImgFrameBindings.cpp diff --git a/depthai-core b/depthai-core index bdbbf828d..e58b85be2 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit bdbbf828da5c392c8e028a83715257b30c91b18f +Subproject commit e58b85be2b5acc6fffee4cd4862fcca070467ef8 diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp new file mode 100644 index 000000000..8c4c8dab3 --- /dev/null +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -0,0 +1,71 @@ +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" +#include +#include + +// depthai +#include "depthai/pipeline/datatype/ToFConfig.hpp" + +//pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_tofconfig(pybind11::module& m, void* pCallstack){ + + using namespace dai; + + py::class_> rawToFConfig(m, "RawToFConfig", DOC(dai, RawToFConfig)); + py::class_ depthParams(rawToFConfig, "DepthParams", DOC(dai, RawToFConfig, DepthParams)); + py::enum_ depthParamsTypeFMod(depthParams, "TypeFMod", DOC(dai, RawToFConfig, DepthParams, TypeFMod)); + py::class_> toFConfig(m, "ToFConfig", DOC(dai, ToFConfig)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Metadata / raw + rawToFConfig + .def(py::init<>()) + .def_readwrite("depthParams", &RawToFConfig::depthParams, DOC(dai, RawToFConfig, depthParams)) + ; + + depthParamsTypeFMod + .value("ALL", RawToFConfig::DepthParams::TypeFMod::F_MOD_ALL) + .value("MIN", RawToFConfig::DepthParams::TypeFMod::F_MOD_MIN) + .value("MAX", RawToFConfig::DepthParams::TypeFMod::F_MOD_MAX) + ; + + depthParams + .def(py::init<>()) + .def_readwrite("enable", &RawToFConfig::DepthParams::enable, DOC(dai, RawToFConfig, DepthParams, enable)) + .def_readwrite("freqModUsed", &RawToFConfig::DepthParams::freqModUsed, DOC(dai, RawToFConfig, DepthParams, freqModUsed)) + .def_readwrite("avgPhaseShuffle", &RawToFConfig::DepthParams::avgPhaseShuffle, DOC(dai, RawToFConfig, DepthParams, avgPhaseShuffle)) + .def_readwrite("minimumAmplitude", &RawToFConfig::DepthParams::minimumAmplitude, DOC(dai, RawToFConfig, DepthParams, minimumAmplitude)) + ; + + // Message + toFConfig + .def(py::init<>()) + .def(py::init>()) + + .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams, 2)) + + .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) + .def("get", &ToFConfig::get, DOC(dai, ToFConfig, get)) + ; + + // add aliases + m.attr("ToFConfig").attr("DepthParams") = m.attr("RawToFConfig").attr("DepthParams"); + +} diff --git a/src/pipeline/node/ToFBindings.cpp b/src/pipeline/node/ToFBindings.cpp new file mode 100644 index 000000000..9bde78539 --- /dev/null +++ b/src/pipeline/node/ToFBindings.cpp @@ -0,0 +1,47 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/ToF.hpp" + +void bind_tof(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ tofProperties(m, "ToFProperties", DOC(dai, ToFProperties)); + auto tof = ADD_NODE(ToF); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + tofProperties + .def_readwrite("initialConfig", &ToFProperties::initialConfig) + ; + + // Node + tof + .def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input)) + .def_readonly("inputConfig", &ToF::inputConfig, DOC(dai, node, ToF, inputConfig), DOC(dai, node, ToF, inputConfig)) + .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth)) + .def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude)) + .def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error)) + .def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig), DOC(dai, node, ToF, initialConfig)) + ; + // ALIAS + daiNodeModule.attr("ToF").attr("Properties") = tofProperties; + +} diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 9730e0d54..ddca1828a 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -45,7 +45,7 @@ from itertools import cycle from pathlib import Path import sys -import cam_test_gui +#import cam_test_gui import signal @@ -181,6 +181,9 @@ def get(self): control = pipeline.createXLinkIn() control.setStreamName('control') +xinTofConfig = pipeline.createXLinkIn() +xinTofConfig.setStreamName('tofConfig') + cam = {} tof = {} xout = {} @@ -199,6 +202,11 @@ def get(self): tof[c] = pipeline.create(dai.node.ToF) cam[c].raw.link(tof[c].input) tof[c].depth.link(xout[c].input) + xinTofConfig.out.link(tof[c].inputConfig) + tofConfig = tof[c].initialConfig.get() + tofConfig.setFreqModUsed(dai.ToFConfig.DepthParams.TypeFMod.F_MOD_MIN) + tofConfig.SetAvgPhaseShuffle(True) + tofConfig.setMinAmplitude(20.0) elif cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) @@ -230,7 +238,7 @@ def get(self): xout_raw[c] = pipeline.create(dai.node.XLinkOut) xout_raw[c].setStreamName(raw_name) streams.append(raw_name) - cam[c].raw.link(xout_raw[c].input) + tof[c].amplitude.link(xout_raw[c].input) cam[c].setRawOutputPacked(False) if args.camera_tuning: From 8c2b0cd6ca984c3e4065d5ec4be07105bdda8772 Mon Sep 17 00:00:00 2001 From: Florin Buica Date: Tue, 25 Apr 2023 10:07:54 +0300 Subject: [PATCH 45/66] preparing to merge to tof vga --- src/DatatypeBindings.cpp | 2 ++ src/pipeline/datatype/ToFConfigBindings.cpp | 5 ++++- src/pipeline/node/NodeBindings.cpp | 2 ++ src/pipeline/node/ToFBindings.cpp | 4 ++-- utilities/cam_test.py | 24 +++++++++++++++++---- 5 files changed, 30 insertions(+), 7 deletions(-) diff --git a/src/DatatypeBindings.cpp b/src/DatatypeBindings.cpp index deae33696..5ff683f3d 100644 --- a/src/DatatypeBindings.cpp +++ b/src/DatatypeBindings.cpp @@ -21,6 +21,7 @@ void bind_spatiallocationcalculatordata(pybind11::module& m, void* pCallstack); void bind_stereodepthconfig(pybind11::module& m, void* pCallstack); void bind_systeminformation(pybind11::module& m, void* pCallstack); void bind_trackedfeatures(pybind11::module& m, void* pCallstack); +void bind_tofconfig(pybind11::module& m, void* pCallstack); void bind_tracklets(pybind11::module& m, void* pCallstack); void DatatypeBindings::addToCallstack(std::deque& callstack) { @@ -46,6 +47,7 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_stereodepthconfig); callstack.push_front(bind_systeminformation); callstack.push_front(bind_trackedfeatures); + callstack.push_front(bind_tofconfig); callstack.push_front(bind_tracklets); } diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp index 8c4c8dab3..34716a611 100644 --- a/src/pipeline/datatype/ToFConfigBindings.cpp +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -58,8 +58,11 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ toFConfig .def(py::init<>()) .def(py::init>()) - + .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams, 2)) + .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setDepthParams, 2)) + .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, node, ToFConfig, setAvgPhaseShuffle)) + .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, node, ToFConfig, setMinAmplitude)) .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) .def("get", &ToFConfig::get, DOC(dai, ToFConfig, get)) diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index ef2bd8c2e..56c089e80 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -114,6 +114,7 @@ void bind_edgedetector(pybind11::module& m, void* pCallstack); void bind_featuretracker(pybind11::module& m, void* pCallstack); void bind_apriltag(pybind11::module& m, void* pCallstack); void bind_detectionparser(pybind11::module& m, void* pCallstack); +void bind_tof(pybind11::module& m, void* pCallstack); void NodeBindings::addToCallstack(std::deque& callstack) { // Bind Node et al @@ -143,6 +144,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_featuretracker); callstack.push_front(bind_apriltag); callstack.push_front(bind_detectionparser); + callstack.push_front(bind_tof); } void NodeBindings::bind(pybind11::module& m, void* pCallstack){ diff --git a/src/pipeline/node/ToFBindings.cpp b/src/pipeline/node/ToFBindings.cpp index 9bde78539..ae67b149e 100644 --- a/src/pipeline/node/ToFBindings.cpp +++ b/src/pipeline/node/ToFBindings.cpp @@ -29,13 +29,13 @@ void bind_tof(pybind11::module& m, void* pCallstack){ // Properties tofProperties - .def_readwrite("initialConfig", &ToFProperties::initialConfig) + .def_readwrite("initialConfig", &ToFProperties::initialConfig, DOC(dai, ToFProperties, initialConfig)) ; // Node tof - .def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input)) .def_readonly("inputConfig", &ToF::inputConfig, DOC(dai, node, ToF, inputConfig), DOC(dai, node, ToF, inputConfig)) + .def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input)) .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth)) .def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude)) .def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error)) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index ddca1828a..7d0df63e4 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -189,6 +189,7 @@ def get(self): xout = {} xout_raw = {} streams = [] +tofConfig = {} for c in cam_list: tofEnableRaw = False xout[c] = pipeline.createXLinkOut() @@ -204,9 +205,10 @@ def get(self): tof[c].depth.link(xout[c].input) xinTofConfig.out.link(tof[c].inputConfig) tofConfig = tof[c].initialConfig.get() - tofConfig.setFreqModUsed(dai.ToFConfig.DepthParams.TypeFMod.F_MOD_MIN) - tofConfig.SetAvgPhaseShuffle(True) - tofConfig.setMinAmplitude(20.0) + tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN + tofConfig.depthParams.avgPhaseShuffle = False + tofConfig.depthParams.minimumAmplitude = 3.0 + tof[c].initialConfig.set(tofConfig) elif cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) @@ -287,6 +289,7 @@ def exit_cleanly(signum, frame): fps_capt[c] = FPS() controlQueue = device.getInputQueue('control') + tofCfgQueue = device.getInputQueue('tofConfig') # Manual exposure/focus set step EXP_STEP = 500 # us @@ -331,6 +334,7 @@ def exit_cleanly(signum, frame): chroma_denoise = 0 control = 'none' show = False + tof_amp_min = tofConfig.depthParams.minimumAmplitude print("Cam:", *[' ' + c.ljust(8) for c in cam_list], "[host | capture timestamp]") @@ -419,6 +423,11 @@ def exit_cleanly(signum, frame): elif key == ord('c'): capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') + elif key == ord('g'): + f_mod = dai.RawToFConfig.DepthParams.TypeFMod.MAX if tofConfig.depthParams.freqModUsed == dai.RawToFConfig.DepthParams.TypeFMod.MIN else dai.RawToFConfig.DepthParams.TypeFMod.MIN + print("ToF toggling f_mod value to:", f_mod) + tofConfig.depthParams.freqModUsed = f_mod + tofCfgQueue.send(tofConfig) elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() @@ -493,7 +502,7 @@ def exit_cleanly(signum, frame): if floodIntensity < 0: floodIntensity = 0 device.setIrFloodLightBrightness(floodIntensity) - elif key >= 0 and chr(key) in '34567890[]': + elif key >= 0 and chr(key) in '34567890[]p': if key == ord('3'): control = 'awb_mode' elif key == ord('4'): @@ -514,6 +523,8 @@ def exit_cleanly(signum, frame): control = 'luma_denoise' elif key == ord(']'): control = 'chroma_denoise' + elif key == ord('p'): + control = 'tof_amplitude_min' print("Selected control:", control) elif key in [ord('-'), ord('_'), ord('+'), ord('=')]: change = 0 @@ -564,4 +575,9 @@ def exit_cleanly(signum, frame): chroma_denoise = clamp(chroma_denoise + change, 0, 4) print("Chroma denoise:", chroma_denoise) ctrl.setChromaDenoise(chroma_denoise) + elif control == 'tof_amplitude_min': + amp_min = clamp(tofConfig.depthParams.minimumAmplitude + change, 0, 50) + print("Setting min amplitude(confidence) to:", amp_min) + tofConfig.depthParams.minimumAmplitude = amp_min + tofCfgQueue.send(tofConfig) controlQueue.send(ctrl) From 2c43c8c526782c5d3a524b78f8ec80876e0597e5 Mon Sep 17 00:00:00 2001 From: Florin Buica Date: Tue, 25 Apr 2023 13:17:54 +0300 Subject: [PATCH 46/66] add configurable phase avg --- utilities/cam_test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 7d0df63e4..4c2644881 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -428,6 +428,10 @@ def exit_cleanly(signum, frame): print("ToF toggling f_mod value to:", f_mod) tofConfig.depthParams.freqModUsed = f_mod tofCfgQueue.send(tofConfig) + elif key == ord('h'): + tofConfig.depthParams.avgPhaseShuffle = not tofConfig.depthParams.avgPhaseShuffle + print("ToF toggling avgPhaseShuffle value to:", tofConfig.depthParams.avgPhaseShuffle) + tofCfgQueue.send(tofConfig) elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() From 147eec19ff396b92e592646b64e7801b2d8af439 Mon Sep 17 00:00:00 2001 From: Florin Buica Date: Tue, 25 Apr 2023 15:24:58 +0300 Subject: [PATCH 47/66] add amplitude separately -- cherry-picked, with few more ToF related changes included --- utilities/cam_test.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 4c2644881..7d6d126e5 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -81,10 +81,19 @@ def socket_type_pair(arg): help="Make OpenCV windows resizable. Note: may introduce some artifacts") parser.add_argument('-tun', '--camera-tuning', type=Path, help="Path to custom camera tuning database") -parser.add_argument('-d', '--device', default="", type=str, - help="Optional MX ID of the device to connect to.") parser.add_argument('-raw', '--enable-raw', default=False, action="store_true", help='Enable the RAW camera streams') +parser.add_argument('-tofraw', '--tof-raw', action='store_true', + help="Show just ToF raw output instead of post-processed depth") +parser.add_argument('-tofamp', '--tof-amplitude', action='store_true', + help="Show also ToF amplitude output alongside depth") +parser.add_argument('-tofcm', '--tof-cm', action='store_true', + help="Show ToF depth output in centimeters, capped to 255") +parser.add_argument('-rgbprev', '--rgb-preview', action='store_true', + help="Show RGB `preview` stream instead of full size `isp`") + +parser.add_argument('-d', '--device', default="", type=str, + help="Optional MX ID of the device to connect to.") parser.add_argument('-ctimeout', '--connection-timeout', default=30000, help="Connection timeout in ms. Default: %(default)s (sets DEPTHAI_CONNECTION_TIMEOUT environment variable)") @@ -188,6 +197,7 @@ def get(self): tof = {} xout = {} xout_raw = {} +xout_tof_amp = {} streams = [] tofConfig = {} for c in cam_list: @@ -209,6 +219,12 @@ def get(self): tofConfig.depthParams.avgPhaseShuffle = False tofConfig.depthParams.minimumAmplitude = 3.0 tof[c].initialConfig.set(tofConfig) + if args.tof_amplitude: + amp_name = 'tof_amplitude_' + c + xout_tof_amp[c] = pipeline.create(dai.node.XLinkOut) + xout_tof_amp[c].setStreamName(amp_name) + streams.append(amp_name) + tof[c].amplitude.link(xout_tof_amp[c].input) elif cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) @@ -240,7 +256,7 @@ def get(self): xout_raw[c] = pipeline.create(dai.node.XLinkOut) xout_raw[c].setStreamName(raw_name) streams.append(raw_name) - tof[c].amplitude.link(xout_raw[c].input) + cam[c].raw.link(xout_raw[c].input) cam[c].setRawOutputPacked(False) if args.camera_tuning: @@ -271,6 +287,8 @@ def exit_cleanly(signum, frame): cam_name[p.socket.name] = p.sensorName if args.enable_raw: cam_name['raw_'+p.socket.name] = p.sensorName + if args.tof_amplitude: + cam_name['tof_amplitude_'+p.socket.name] = p.sensorName print('USB speed:', device.getUsbSpeed().name) @@ -352,7 +370,7 @@ def exit_cleanly(signum, frame): fps_capt[c].update(pkt.getTimestamp().total_seconds()) width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() - if cam_type_tof[c.split('_')[-1]] and not c.startswith('raw_'): + if cam_type_tof[c.split('_')[-1]] and not (c.startswith('raw_') or c.startswith('tof_amplitude_')): if args.tof_cm: # pixels represent `cm`, capped to 255. Value can be checked hovering the mouse frame = (frame // 10).clip(0, 255).astype(np.uint8) @@ -379,7 +397,7 @@ def exit_cleanly(signum, frame): ) capture_list.remove(c) print() - if c.startswith('raw_'): + if c.startswith('raw_') or c.startswith('tof_amplitude_'): if capture: filename = capture_file_info + '_10bit.bw' print('Saving:', filename) From cc12e2ba25fc5870058894d2803e5f460bd66910 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Tue, 2 May 2023 15:48:42 +0300 Subject: [PATCH 48/66] ToFConfigBindings: fix docs/wheels build --- src/pipeline/datatype/ToFConfigBindings.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp index 34716a611..a51fae8ac 100644 --- a/src/pipeline/datatype/ToFConfigBindings.cpp +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -59,10 +59,10 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def(py::init>()) - .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams, 2)) - .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setDepthParams, 2)) - .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, node, ToFConfig, setAvgPhaseShuffle)) - .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, node, ToFConfig, setMinAmplitude)) + .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams)) + .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setFreqModUsed)) + .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, ToFConfig, setAvgPhaseShuffle)) + .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, ToFConfig, setMinAmplitude)) .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) .def("get", &ToFConfig::get, DOC(dai, ToFConfig, get)) From 25bed1f66d5ac7538d79f35e810ad8bb39833b22 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 8 May 2023 10:49:07 +0300 Subject: [PATCH 49/66] Some cleanup, some fixes for cam_test.py -- includes few missing things from previous cherry-picks... --- CMakeLists.txt | 1 - utilities/cam_test.py | 41 +++++++++++++++++++++++++---------------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 35c956c01..a20e65da9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -124,7 +124,6 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/node/IMUBindings.cpp src/pipeline/node/EdgeDetectorBindings.cpp src/pipeline/node/FeatureTrackerBindings.cpp - src/pipeline/node/ToFBindings.cpp src/pipeline/node/AprilTagBindings.cpp src/pipeline/node/DetectionParserBindings.cpp src/pipeline/node/WarpBindings.cpp diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 7d6d126e5..240007de3 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -45,7 +45,6 @@ from itertools import cycle from pathlib import Path import sys -#import cam_test_gui import signal @@ -53,17 +52,18 @@ def socket_type_pair(arg): socket, type = arg.split(',') if not (socket in ['rgb', 'left', 'right', 'cama', 'camb', 'camc', 'camd']): raise ValueError("") - if not (type in ['m', 'mono', 'c', 'color']): + if not (type in ['m', 'mono', 'c', 'color', 't', 'tof']): raise ValueError("") is_color = True if type in ['c', 'color'] else False - return [socket, is_color] + is_tof = True if type in ['t', 'tof'] else False + return [socket, is_color, is_tof] parser = argparse.ArgumentParser() parser.add_argument('-cams', '--cameras', type=socket_type_pair, nargs='+', - default=[['rgb', True], ['left', False], - ['right', False], ['camd', True]], - help="Which camera sockets to enable, and type: c[olor] / m[ono]. " + default=[['rgb', True, False], ['left', False, False], + ['right', False, False], ['camd', True, False]], + help="Which camera sockets to enable, and type: c[olor] / m[ono] / t[of]. " "E.g: -cams rgb,m right,c . Default: rgb,c left,m right,m camd,c") parser.add_argument('-mres', '--mono-resolution', type=int, default=800, choices={480, 400, 720, 800}, help="Select mono camera resolution (height). Default: %(default)s") @@ -109,15 +109,18 @@ def socket_type_pair(arg): import depthai as dai if len(sys.argv) == 1: + import cam_test_gui cam_test_gui.main() cam_list = [] cam_type_color = {} +cam_type_tof = {} print("Enabled cameras:") -for socket, is_color in args.cameras: +for socket, is_color, is_tof in args.cameras: cam_list.append(socket) cam_type_color[socket] = is_color - print(socket.rjust(7), ':', 'color' if is_color else 'mono') + cam_type_tof[socket] = is_tof + print(socket.rjust(7), ':', 'tof' if is_tof else 'color' if is_color else 'mono') print("DepthAI version:", dai.__version__) print("DepthAI path:", dai.__file__) @@ -230,7 +233,10 @@ def get(self): cam[c].setResolution(color_res_opts[args.color_resolution]) cam[c].setIspScale(1, args.isp_downscale) # cam[c].initialControl.setManualFocus(85) # TODO - cam[c].isp.link(xout[c].input) + if args.rgb_preview: + cam[c].preview.link(xout[c].input) + else: + cam[c].isp.link(xout[c].input) else: cam[c] = pipeline.createMonoCamera() cam[c].setResolution(mono_res_opts[args.mono_resolution]) @@ -352,7 +358,9 @@ def exit_cleanly(signum, frame): chroma_denoise = 0 control = 'none' show = False - tof_amp_min = tofConfig.depthParams.minimumAmplitude + + jet_custom = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) + jet_custom[0] = [0, 0, 0] print("Cam:", *[' ' + c.ljust(8) for c in cam_list], "[host | capture timestamp]") @@ -370,7 +378,8 @@ def exit_cleanly(signum, frame): fps_capt[c].update(pkt.getTimestamp().total_seconds()) width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() - if cam_type_tof[c.split('_')[-1]] and not (c.startswith('raw_') or c.startswith('tof_amplitude_')): + cam_skt = c.split('_')[-1] + if cam_type_tof[cam_skt] and not (c.startswith('raw_') or c.startswith('tof_amplitude_')): if args.tof_cm: # pixels represent `cm`, capped to 255. Value can be checked hovering the mouse frame = (frame // 10).clip(0, 255).astype(np.uint8) @@ -387,7 +396,7 @@ def exit_cleanly(signum, frame): print(txt) capture = c in capture_list if capture: - capture_file_info = ('capture_' + c + '_' + cam_name[c] + capture_file_info = ('capture_' + c + '_' + cam_name[cam_socket_opts[cam_skt].name] + '_' + str(width) + 'x' + str(height) + '_exp_' + str(int(pkt.getExposureTime().total_seconds()*1e6)) + '_iso_' + str(pkt.getSensitivity()) @@ -409,7 +418,7 @@ def exit_cleanly(signum, frame): if type == dai.ImgFrame.Type.RAW12: multiplier = (1 << (16-4)) frame = frame * multiplier # Debayer as color for preview/png - if cam_type_color[c.split('_')[-1]]: + if cam_type_color[cam_skt]: # See this for the ordering, at the end of page: # https://docs.opencv.org/4.5.1/de/d25/imgproc_color_conversions.html # TODO add bayer order to ImgFrame getType() @@ -441,12 +450,12 @@ def exit_cleanly(signum, frame): elif key == ord('c'): capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') - elif key == ord('g'): + elif key == ord('g') and tof: f_mod = dai.RawToFConfig.DepthParams.TypeFMod.MAX if tofConfig.depthParams.freqModUsed == dai.RawToFConfig.DepthParams.TypeFMod.MIN else dai.RawToFConfig.DepthParams.TypeFMod.MIN print("ToF toggling f_mod value to:", f_mod) tofConfig.depthParams.freqModUsed = f_mod tofCfgQueue.send(tofConfig) - elif key == ord('h'): + elif key == ord('h') and tof: tofConfig.depthParams.avgPhaseShuffle = not tofConfig.depthParams.avgPhaseShuffle print("ToF toggling avgPhaseShuffle value to:", tofConfig.depthParams.avgPhaseShuffle) tofCfgQueue.send(tofConfig) @@ -597,7 +606,7 @@ def exit_cleanly(signum, frame): chroma_denoise = clamp(chroma_denoise + change, 0, 4) print("Chroma denoise:", chroma_denoise) ctrl.setChromaDenoise(chroma_denoise) - elif control == 'tof_amplitude_min': + elif control == 'tof_amplitude_min' and tof: amp_min = clamp(tofConfig.depthParams.minimumAmplitude + change, 0, 50) print("Setting min amplitude(confidence) to:", amp_min) tofConfig.depthParams.minimumAmplitude = amp_min From d9c7f15c127c513e1f6eff4d5c71b6733bae9ed1 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 17 Mar 2023 05:37:05 +0200 Subject: [PATCH 50/66] ImgFrame: handle RAW10/12/14 (unpacked) like RAW16 --- src/pipeline/datatype/ImgFrameBindings.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/pipeline/datatype/ImgFrameBindings.cpp b/src/pipeline/datatype/ImgFrameBindings.cpp index b39b1e721..a89ff4230 100644 --- a/src/pipeline/datatype/ImgFrameBindings.cpp +++ b/src/pipeline/datatype/ImgFrameBindings.cpp @@ -202,6 +202,9 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ break; case ImgFrame::Type::RAW16: + case ImgFrame::Type::RAW14: + case ImgFrame::Type::RAW12: + case ImgFrame::Type::RAW10: shape = {img.getHeight(), img.getWidth()}; dtype = py::dtype::of(); break; @@ -303,6 +306,9 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ case ImgFrame::Type::RAW8: case ImgFrame::Type::RAW16: + case ImgFrame::Type::RAW14: + case ImgFrame::Type::RAW12: + case ImgFrame::Type::RAW10: case ImgFrame::Type::GRAY8: case ImgFrame::Type::GRAYF16: default: From 561dcd174a82428684b9e12564c37e97b4a26935 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Sun, 14 May 2023 10:31:51 +0200 Subject: [PATCH 51/66] Added some python bindings for Node Inputs (getParent and possibleDatatypes) --- src/pipeline/node/NodeBindings.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index 56c089e80..d1a758fe8 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -211,6 +211,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("group", &Node::Input::group, DOC(dai, Node, Input, group)) .def_readwrite("name", &Node::Input::name, DOC(dai, Node, Input, name)) .def_readwrite("type", &Node::Input::type, DOC(dai, Node, Input, type)) + .def_readwrite("possibleDatatypes", &Node::Input::possibleDatatypes, DOC(dai, Node, Input, possibleDatatypes)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) .def("setBlocking", &Node::Input::setBlocking, py::arg("blocking"), DOC(dai, Node, Input, setBlocking)) .def("getBlocking", &Node::Input::getBlocking, DOC(dai, Node, Input, getBlocking)) .def("setQueueSize", &Node::Input::setQueueSize, py::arg("size"), DOC(dai, Node, Input, setQueueSize)) From 0f20d8d4a08e8422307f3611f91ba709ce5e8f24 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 15 May 2023 18:47:54 +0300 Subject: [PATCH 52/66] core/FW: UVC H.264, fix Device to forward pipeline DeviceConfig --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 7bf36cdef..4a452a9df 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 7bf36cdef7bee74c89b50515d0149d9468191c12 +Subproject commit 4a452a9df02d2991fdac29b8984313d0d89d76e8 From 5af35ba3ed4f972b86b85b4523f031e156b9a2ae Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 15 May 2023 19:01:17 +0300 Subject: [PATCH 53/66] rgb_uvc.py: Windows fixes: change SIGKILL->SIGTERM, set env var before depthai import --- examples/ColorCamera/rgb_uvc.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/examples/ColorCamera/rgb_uvc.py b/examples/ColorCamera/rgb_uvc.py index 42e510c95..78a27129b 100755 --- a/examples/ColorCamera/rgb_uvc.py +++ b/examples/ColorCamera/rgb_uvc.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import depthai as dai import time import argparse @@ -10,6 +9,14 @@ parser.add_argument('-l', '--load-and-exit', default=False, action="store_true") args = parser.parse_args() +if args.load_and_exit: + import os + # Disabling device watchdog, so it doesn't need the host to ping periodically. + # Note: this is done before importing `depthai` + os.environ["DEPTHAI_WATCHDOG"] = "0" + +import depthai as dai + def getPipeline(): enable_4k = True # Will downscale 4K -> 1080p @@ -68,17 +75,14 @@ def flash(pipeline=None): quit() if args.load_and_exit: - import os - # Disabling device watchdog, so it doesn't need the host to ping periodically - os.environ["DEPTHAI_WATCHDOG"] = "0" device = dai.Device(getPipeline()) - print("\nDevice started, open a UVC viewer to check the camera stream.") - print("Attempting to force-quit this process...") - print("To reconnect with depthai, a device power-cycle may be required") - # We do not want the device to be closed, so kill the process. + print("\nDevice started. Attempting to force-terminate this process...") + print("Open an UVC viewer to check the camera stream.") + print("To reconnect with depthai, a device power-cycle may be required in some cases") + # We do not want the device to be closed, so terminate the process uncleanly. # (TODO add depthai API to be able to cleanly exit without closing device) import signal - os.kill(os.getpid(),signal.SIGKILL) + os.kill(os.getpid(), signal.SIGTERM) # Standard UVC load with depthai with dai.Device(getPipeline()) as device: From a2f73eb7b8751f76d786ae17f26f5c0f246f2efa Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 15 May 2023 20:02:04 +0200 Subject: [PATCH 54/66] Modified XLink to a temporary branch --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 4bb0f8345..cc8edba15 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 4bb0f8345de4f3a1c045e62f19a7c9139accc7dc +Subproject commit cc8edba15e6d69b8ddb1b6b8e303d8219244759c From 1ac989693e6c198cce1d64c65025d1feea743acd Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 16 May 2023 20:18:41 +0200 Subject: [PATCH 55/66] Updated XLink with fixed winusb mxid retrieval --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index cc8edba15..1defb6790 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit cc8edba15e6d69b8ddb1b6b8e303d8219244759c +Subproject commit 1defb67907dcb191335d98897754e0c67ef04075 From 78720c26bea88b1bdda95305d40973b68cb66949 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Fri, 19 May 2023 00:05:55 +0300 Subject: [PATCH 56/66] FW: Stereo: handle queue blocking settings --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index a33f90b3f..75b93d617 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit a33f90b3fb627df08e30a4148002c74f804a50f2 +Subproject commit 75b93d617bf97b3fcd57504438592c294ab64478 From 72cddbb885eb6495ce74ffbe80e1492c612a5452 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Wed, 24 May 2023 18:43:01 +0200 Subject: [PATCH 57/66] [FW] Updated for some devices and ToF --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 75b93d617..2bd6419ba 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 75b93d617bf97b3fcd57504438592c294ab64478 +Subproject commit 2bd6419babb3a3c46f3ed67acd227b74fd43cb85 From ba178299d80bb6f60aa20c836ca8d683b577ca0f Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 29 May 2023 22:49:14 +0200 Subject: [PATCH 58/66] [FW] WIP for S2 PoE boards --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 2bd6419ba..14dd92bff 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 2bd6419babb3a3c46f3ed67acd227b74fd43cb85 +Subproject commit 14dd92bff859952e15d918bee7a2f784d7fb040f From 3f093046749ab51ccae41eb805932923a01fc452 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 30 May 2023 18:04:28 +0200 Subject: [PATCH 59/66] [FW] WIP for new SoMs --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 14dd92bff..be9c9d9fa 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 14dd92bff859952e15d918bee7a2f784d7fb040f +Subproject commit be9c9d9fa379fdf6f433a8bf758a194529a8b510 From 42af7451ddd3f75a1721ba10dcc21bfa0f3216b0 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 30 May 2023 19:08:47 +0200 Subject: [PATCH 60/66] Fixed build --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index be9c9d9fa..7c07d7c93 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit be9c9d9fa379fdf6f433a8bf758a194529a8b510 +Subproject commit 7c07d7c933a0e5993e23a5460aa5e0c9f5bb409b From a6e7193636aa7731e4a71b438b457410bf997374 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 2 Jun 2023 19:19:24 +0200 Subject: [PATCH 61/66] [FW] Fixed camera orientation --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 2bd6419ba..ceb954c16 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 2bd6419babb3a3c46f3ed67acd227b74fd43cb85 +Subproject commit ceb954c16f0d8214873242c0adeb9c3819390a96 From fc53ec248b91b165d45c24bfd236fd351a19891d Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Wed, 7 Jun 2023 19:28:04 +0200 Subject: [PATCH 62/66] Bump version to v2.22.0.0 --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 5f75ae74a..ae0379482 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 5f75ae74ade6ae12bd56c71c0149c0482f23c18f +Subproject commit ae03794826aeb34f00481238ae30c65f6ec8915c From 53feb9395f3e7eea60fbadcfd4b26bf61824770c Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 8 Jun 2023 12:10:35 +0200 Subject: [PATCH 63/66] Added filter by device name --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index ae0379482..830fa4cf6 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit ae03794826aeb34f00481238ae30c65f6ec8915c +Subproject commit 830fa4cf696b2fc7ab96bc305e14428a88bb5e6a From 552abdedc38dac0aa4712ed30da43f1eeac67825 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 8 Jun 2023 12:27:20 +0200 Subject: [PATCH 64/66] Merge pull request #836 from JanLipovsek/get_child_run_id added child run id to summary --- .github/workflows/main.yml | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 030265f29..469f0b65d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -540,14 +540,33 @@ jobs: event-type: depthai-python-release client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' + # notify_hil_workflow_linux_x86_64: + # needs: [build-linux-x86_64] + # runs-on: ubuntu-latest + # steps: + # - name: Repository Dispatch + # uses: peter-evans/repository-dispatch@v2 + # with: + # token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} + # repository: luxonis/depthai-core-hil-tests + # event-type: python-hil-event + # client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' + notify_hil_workflow_linux_x86_64: needs: [build-linux-x86_64] runs-on: ubuntu-latest steps: - - name: Repository Dispatch - uses: peter-evans/repository-dispatch@v2 - with: - token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} - repository: luxonis/depthai-core-hil-tests - event-type: python-hil-event - client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' \ No newline at end of file + - name: Dispatch an action and get the run ID + uses: codex-/return-dispatch@v1 + id: return_dispatch + with: + token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT + ref: main # or refs/heads/target_branch + repo: depthai-core-hil-tests + owner: luxonis + workflow: regression_test.yml + workflow_inputs: '{"commit": "${{ github.ref }}", "sha": "${{ github.sha }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' + workflow_timeout_seconds: 120 # Default: 300 + + - name: Release + run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY \ No newline at end of file From ec095966037dda2fa521b84e5f10fe8a28d0e4fd Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 12 Jun 2023 18:02:06 +0300 Subject: [PATCH 65/66] FW: update IMX296 tuning --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 830fa4cf6..0435b503c 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 830fa4cf696b2fc7ab96bc305e14428a88bb5e6a +Subproject commit 0435b503c65c7bdd280d2b641747118c0f72c865 From 21463ddc0e482a102180e52993466336a63c81d9 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Tue, 13 Jun 2023 02:04:40 +0200 Subject: [PATCH 66/66] [FW] Fixed OAK-D-SR and OV9782 issues --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 0435b503c..2facab6fb 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 0435b503c65c7bdd280d2b641747118c0f72c865 +Subproject commit 2facab6fb48c8c31245e56bd64589b5b58862d93