Skip to content

Commit

Permalink
Release 2.25.1 (#1005)
Browse files Browse the repository at this point in the history
* FW: fix fsync for OAK-D-SR

* Add tilt projection support

* Fixing externally managed environment (#1001)

- Added custom error message in addition to "Externally managed environment" to let users know how to solve it

- Updated .workflow to always create a separate environment where the depthai will be installed to avoid above error

* Install NN blobs from blobconverter (#999)

* added thermal_nn to install_requirements
* Adding thermal to artifactory

Co-authored-by: zrezke <[email protected]>

* Fixed two bugs / unexpected behaviours (#1002)

* Added bindings for the api.

* core

* FW: fixes for IMX378 and IMX582:
fix concurrent run,
fix scaling with IMX378 THE_1352X1012 resolution,
change Camera node best sensor config select to prioritize matching aspect ratio

* Remove the RH notification

* FW: fix default fsync GPIO state for OAK-FFC-4P R7,
FSIN_4LANE GPIO42 = input, pull-down.
Other depthai-core updates

* Develop sync with main (#1004)

* Docs/release 25 (#997)

* Initial docs update for v2.25

* Added pcl example docs

* Adding pointcloud control example

* Updating pointcloud example

* Added encoded_frame docs (#998)

* Added encoded_frame docs

* Docs update (#1000)

* Update sync_node.rst
* Adding USB 3.2Gen2 enable docs

---------

Co-authored-by: jakaskerl <[email protected]>

* Core v2.25.1

* Fixed FFC3P boot issue.

* Update core

* Bump core to tagged.

---------

Co-authored-by: alex-luxonis <[email protected]>
Co-authored-by: SzabolcsGergely <[email protected]>
Co-authored-by: jakaskerl <[email protected]>
Co-authored-by: Matevz Morato <[email protected]>
  • Loading branch information
5 people authored Apr 22, 2024
1 parent e738ead commit 4d85750
Show file tree
Hide file tree
Showing 7 changed files with 200 additions and 26 deletions.
12 changes: 0 additions & 12 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -531,18 +531,6 @@ jobs:
ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }}
ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }}

notify_robothub:
if: startsWith(github.ref, 'refs/tags/v')
needs: [release]
runs-on: ubuntu-latest
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.REPO_ACCESS_TOKEN }}
repository: luxonis/robothub-apps
event-type: depthai-python-release
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

# notify_hil_workflow_linux_x86_64:
# needs: [build-linux-x86_64]
Expand Down
10 changes: 7 additions & 3 deletions .github/workflows/test-install-dependencies.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
if: endsWith(matrix.container_image, 'rolling') == true
run: |
sudo apt-get install -y python3-venv
python3 -m venv venv
. venv/bin/activate
python3 -m venv .env
. .env/bin/activate
pip install --upgrade pip
python3 examples/install_requirements.py
shell: bash
Expand All @@ -56,9 +56,13 @@
run: |
sed '/udevadm control --reload-rules && sudo udevadm trigger/d' docs/source/_static/install_dependencies.sh > tmp_script.sh
bash tmp_script.sh
- name: Install example requirements
- name: Create a virtual environment
run: |
python3 -m venv .env
. .env/bin/activate
pip install --upgrade pip
python3 examples/install_requirements.py
shell: bash
test_windows:
runs-on: windows-latest
steps:
Expand Down
82 changes: 82 additions & 0 deletions examples/NeuralNetwork/thermal_nnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import depthai as dai
import cv2
from pathlib import Path
import numpy as np
import sys


nnPath = str((Path(__file__).parent / Path('../models/yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob')).resolve().absolute())
if len(sys.argv) > 1:
nnPath = sys.argv[1]

if not Path(nnPath).exists():
import sys
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')

labels = ["person"]

device = dai.Device()

pipeline = dai.Pipeline()
nnet = pipeline.create(dai.node.YoloDetectionNetwork)
nnet.setBlobPath(nnPath)
nnet.setConfidenceThreshold(0.5)
nnet.setNumClasses(1)
nnet.setCoordinateSize(4)
nnet.setIouThreshold(0.4)

thermalCam = pipeline.create(dai.node.Camera)
thermalCam.setBoardSocket(dai.CameraBoardSocket.CAM_E)
thermalCam.setPreviewSize(256, 192)

thermalCam.raw.link(nnet.input)

rawOut = pipeline.createXLinkOut()
rawOut.setStreamName("preview")
thermalCam.preview.link(rawOut.input)

xoutNn = pipeline.createXLinkOut()
xoutNn.setStreamName("nn")
nnet.out.link(xoutNn.input)

xoutPass = pipeline.createXLinkOut()
xoutPass.setStreamName("pass")
nnet.passthrough.link(xoutPass.input)

device.startPipeline(pipeline)

qNn = device.getOutputQueue(name="nn", maxSize=2, blocking=False)
qPass = device.getOutputQueue(name="pass", maxSize=2, blocking=False)
qPreview = device.getOutputQueue(name="preview", maxSize=2, blocking=False)

cv2.namedWindow("nnet", cv2.WINDOW_NORMAL)
cv2.namedWindow("raw", cv2.WINDOW_NORMAL)
cv2.resizeWindow("nnet", 640, 480)
cv2.resizeWindow("raw", 640, 480)

while True:
inNn = qNn.get()
inPass = qPass.tryGet()
inPreview = qPreview.get()
if inNn and inPass:
frame = inPass.getCvFrame().astype(np.float32)
min_, max_ = frame.min(), frame.max()
colormappedFrame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
colormappedFrame = cv2.applyColorMap(colormappedFrame, cv2.COLORMAP_MAGMA)

detections = inNn.detections
for detection in detections:
xmin = max(0.0, detection.xmin)
ymin = max(0.0, detection.ymin)
xmax = min(1.0, detection.xmax)
ymax = min(1.0, detection.ymax)
pt1 = int(xmin * 256), int(ymin * 192)
pt2 = int(xmax * 256), int(ymax * 192)
cv2.rectangle(colormappedFrame, pt1, pt2, (0, 255, 0))
cv2.putText(colormappedFrame, labels[detection.label], pt1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow("nnet", colormappedFrame)
if inPreview:
cv2.imshow("raw", inPreview.getCvFrame())

if cv2.waitKey(1) == ord("q"):
break
86 changes: 76 additions & 10 deletions examples/install_requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
import argparse
import re
import platform
from subprocess import CalledProcessError, DEVNULL
import textwrap


convert_default = "empty"
parser = argparse.ArgumentParser()
Expand Down Expand Up @@ -46,7 +49,7 @@ def hasWhitespace(string):
# try to import opencv, numpy in a subprocess, since it might fail with illegal instruction
# if it was previously installed w/ pip without setting OPENBLAS_CORE_TYPE=ARMV8 env variable
try:
subprocess.check_call([sys.executable, "-c", "import numpy, cv2;"])
subprocess.check_call([sys.executable, "-c", "import numpy, cv2;"], stderr=DEVNULL)
requireOpenCv = False
except subprocess.CalledProcessError as ex:
requireOpenCv = True
Expand Down Expand Up @@ -94,10 +97,38 @@ def hasWhitespace(string):

# Update pip
pip_update_cmd = [*pip_install, "pip"]

if args.dry_run:
prettyPrint(pip_update_cmd)
else:
subprocess.check_call(pip_update_cmd)
try:
subprocess.check_call(pip_update_cmd)
except CalledProcessError as e:
print(f"\n\n\033[31m\033[1m[Warning]\033[0m An error occurred while trying to update pip: {e}\n")
print("This likely stems from the fact that you're not using a Python virtual environment.")
venv_creation_instructions = textwrap.dedent(f"""\
\033[94m\033[1m
Here's how you can create and activate a virtual environment:
1. Create a virtual environment:
- For Linux or MacOS, use: python3 -m venv {parent_dir}/.env
- For Windows, use: python -m venv {parent_dir}/.env
2. Activate the virtual environment:
- For Linux or MacOS, use: source {parent_dir}/.env/bin/activate
- For Windows, use: {parent_dir}/.env/Scripts/activate
Once activated, you'll be working within the virtual environment. You can then attempt to re-run this script.
To exit the virtual environment when you're done, simply use the command: deactivate
For more detailed instructions, please refer to the official Python documentation on virtual environments: https://docs.python.org/3/tutorial/venv.html
\033[0m
""")

print(textwrap.indent(venv_creation_instructions, ' '))
exit(0)


# Install python dependencies
python_dependencies_cmd = [*pip_package_install, *DEPENDENCIES]
if args.dry_run:
Expand Down Expand Up @@ -161,16 +192,51 @@ def hasWhitespace(string):
subprocess.check_call(downloader_cmd)

if args.convert != convert_default:
nn_models_shaves = {
"mobilenet-ssd": [5, 6, 8],
"person-detection-retail-0013": [7],
"yolo-v4-tiny-tf": [6],
"yolo-v3-tiny-tf": [6],

nn_model_configs = {
"mobilenet-ssd": {
"shaves": [5, 6, 8],
"compile_params": ["-ip U8"],
"zoo_type": "intel",
"default_ov_version": "2021.4"
},
"person-detection-retail-0013": {
"shaves": [7],
"compile_params": ["-ip U8"],
"zoo_type": "intel",
"default_ov_version": "2021.4"
},
"yolo-v4-tiny-tf": {
"shaves": [6],
"compile_params": ["-ip U8"],
"zoo_type": "intel",
"default_ov_version": "2021.4"
},
"yolo-v3-tiny-tf": {
"shaves": [6],
"compile_params": ["-ip U8"],
"zoo_type": "intel",
"default_ov_version": "2021.4"
},
"yolov6n_thermal_people_256x192": {
"shaves": [6],
"compile_params": ["-ip FP16"],
"zoo_type": "depthai",
"default_ov_version": "2022.1"
},
}

blobconverter_cmds = [
[sys.executable, "-m", "blobconverter", "-zn", nn_name, "-sh", str(nn_shave), "-o", f"{examples_dir}/models", *(["-v", args.convert] if args.convert is not None else [])]
for nn_name in nn_models_shaves
for nn_shave in nn_models_shaves[nn_name]
[sys.executable,
"-m", "blobconverter",
"-zn", nn_name,
"-sh", str(nn_shave),
"-o", f"{examples_dir}/models",
"-zt", nn_model_configs[nn_name]["zoo_type"],
*(["--compile-params", " ".join(nn_model_configs[nn_name]["compile_params"])] if nn_model_configs[nn_name]["compile_params"] else []),
*(["-v", args.convert] if args.convert != convert_default else ["-v", nn_model_configs[nn_name]["default_ov_version"]])]
for nn_name in nn_model_configs
for nn_shave in nn_model_configs[nn_name]["shaves"]
]
install_blobconverter_cmd = [*pip_package_install, "blobconverter"]
for cmd in [install_blobconverter_cmd] + blobconverter_cmds:
Expand Down
33 changes: 33 additions & 0 deletions examples/models/thermal-yolo/model.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# MIT License

# Copyright (c) 2021 Luxonis Holding Corporation

# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:

# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.

# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

description: >-
thermal-yolo
task_type: object_attributes
files:
- name: yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob
size: 9311960
sha256: fb75828e7014ad92170fe54bb3a3253b8be076005bf651ac30eb0841f63a3b86
source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob

framework: dldt
license: https://raw.githubusercontent.com/luxonis/depthai-model-zoo/main/LICENSE
1 change: 1 addition & 0 deletions src/DeviceBootloaderBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@ void DeviceBootloaderBindings::bind(pybind11::module& m, void* pCallstack){

//.def("flashCustom", &DeviceBootloader::flashCustom, py::arg("memory"), py::arg("offset"), py::arg("progressCallback"), py::arg("data"), DOC(dai, DeviceBootloader, flashCustom))
.def("getVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.getVersion(); }, DOC(dai, DeviceBootloader, getVersion))
.def("getFlashedVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.getFlashedVersion(); }, DOC(dai, DeviceBootloader, getFlashedVersion))

.def("isEmbeddedVersion", &DeviceBootloader::isEmbeddedVersion, DOC(dai, DeviceBootloader, isEmbeddedVersion))
.def("getType", &DeviceBootloader::getType, DOC(dai, DeviceBootloader, getType))
Expand Down

0 comments on commit 4d85750

Please sign in to comment.