Skip to content

Commit

Permalink
Merge pull request #1023 from luxonis/release_2.26.0.0
Browse files Browse the repository at this point in the history
Release 2.26.0.0
  • Loading branch information
moratom authored May 25, 2024
2 parents e0726e1 + 307a650 commit ad4ddea
Show file tree
Hide file tree
Showing 25 changed files with 1,297 additions and 129 deletions.
69 changes: 7 additions & 62 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ jobs:
steps:
- name: Print home directory
run: echo Home directory inside container $HOME

- name: Setup cmake
if: matrix.os == 'macos-latest'
uses: jwlawson/[email protected]
- name: Cache .hunter folder
if: matrix.os != 'windows-latest'
uses: actions/cache@v3
Expand Down Expand Up @@ -230,13 +232,13 @@ jobs:
ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }}

# This job builds wheels for macOS x86_64 arch
build-macos-x86_64:
build-macos:
needs: build-docstrings
runs-on: macos-latest
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9, '3.10', '3.11', '3.12']
fail-fast: false
python-version: [3.8, 3.9, '3.10', '3.11', '3.12']
os: [macos-13, macos-14] # macos-13 is x64, macos-14 is arm64
steps:
- name: Cache .hunter folder
uses: actions/cache@v3
Expand Down Expand Up @@ -290,63 +292,6 @@ jobs:
ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }}
ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }}

# This job builds wheels for macOS arm64 arch
build-macos-arm64:
needs: build-docstrings
runs-on: [self-hosted, macOS, ARM64]
steps:
# Cached locally on runner
# - name: Cache .hunter folder
# uses: actions/cache@v3
# with:
# path: ~/.hunter
# key: hunter-macos-latest
- name: List .hunter cache directory
run: |
ls -a -l ~/.hunter/_Base/ || true
echo "PATH=$PATH"
- uses: actions/checkout@v3
with:
submodules: 'recursive'

- uses: actions/download-artifact@v3
with:
name: 'docstrings'
path: docstrings
- name: Specify docstring to use while building the wheel
run: echo "DEPTHAI_PYTHON_DOCSTRINGS_INPUT=$PWD/docstrings/depthai_python_docstring.hpp" >> $GITHUB_ENV

- name: Append build hash if not a tagged commit
if: startsWith(github.ref, 'refs/tags/v') != true
run: echo "BUILD_COMMIT_HASH=${{github.sha}}" >> $GITHUB_ENV

# - name: Build and install depthai-core
# run: |
# echo "MACOSX_DEPLOYMENT_TARGET=11.0" >> $GITHUB_ENV
# cmake -S depthai-core/ -B build_core -D CMAKE_BUILD_TYPE=Release -D CMAKE_TOOLCHAIN_FILE=$PWD/cmake/toolchain/pic.cmake
# cmake --build build_core --target install --parallel 4
# echo "DEPTHAI_INSTALLATION_DIR=$PWD/build_core/install/" >> $GITHUB_ENV

- name: Build wheels
run: for PYBIN in {9..12}; do "python3.${PYBIN}" -m pip wheel . -w wheelhouse/ --verbose; done

- name: Auditing wheels
run: delocate-wheel -v -w wheelhouse/audited wheelhouse/*.whl

- name: Archive wheel artifacts
uses: actions/upload-artifact@v3
with:
name: audited-wheels
path: wheelhouse/audited/
- name: Deploy wheels to artifactory (if not a release)
if: startsWith(github.ref, 'refs/tags/v') != true
run: bash ./ci/upload-artifactory.sh
env:
ARTIFACTORY_URL: ${{ secrets.ARTIFACTORY_URL }}
ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }}
ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }}

# This job builds wheels for x86_64 arch
build-linux-x86_64:
needs: build-docstrings
Expand Down Expand Up @@ -470,7 +415,7 @@ jobs:

release:
if: startsWith(github.ref, 'refs/tags/v')
needs: [pytest, build-linux-armhf, build-windows-x86_64, build-macos-x86_64, build-macos-arm64, build-linux-x86_64, build-linux-arm64]
needs: [pytest, build-linux-armhf, build-windows-x86_64, build-macos, build-linux-x86_64, build-linux-arm64]
runs-on: ubuntu-latest

steps:
Expand Down
4 changes: 3 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@ pybind11_add_module(${TARGET_NAME}
src/pipeline/node/PointCloudBindings.cpp
src/pipeline/node/SyncBindings.cpp
src/pipeline/node/MessageDemuxBindings.cpp

src/pipeline/node/CastBindings.cpp
src/pipeline/node/ImageAlignBindings.cpp
src/pipeline/datatype/ADatatypeBindings.cpp
src/pipeline/datatype/AprilTagConfigBindings.cpp
src/pipeline/datatype/AprilTagsBindings.cpp
Expand All @@ -157,6 +158,7 @@ pybind11_add_module(${TARGET_NAME}
src/pipeline/datatype/TrackletsBindings.cpp
src/pipeline/datatype/PointCloudConfigBindings.cpp
src/pipeline/datatype/PointCloudDataBindings.cpp
src/pipeline/datatype/ImageAlignConfigBindings.cpp
)

if(WIN32)
Expand Down
2 changes: 1 addition & 1 deletion depthai-core
Submodule depthai-core updated 37 files
+3 −1 .github/workflows/main.workflow.yml
+4 −1 CMakeLists.txt
+2 −2 cmake/Depthai/DepthaiBootloaderConfig.cmake
+1 −1 cmake/Depthai/DepthaiDeviceSideConfig.cmake
+36 −0 examples/CMakeLists.txt
+7 −6 examples/Camera/thermal_cam.cpp
+50 −0 examples/Cast/blur.cpp
+63 −0 examples/Cast/concat.cpp
+66 −0 examples/Cast/diff.cpp
+1 −1 examples/FeatureTracker/feature_tracker.cpp
+148 −0 examples/ImageAlign/depth_align.cpp
+135 −0 examples/ImageAlign/image_align.cpp
+164 −0 examples/ImageAlign/thermal_align.cpp
+135 −0 examples/ImageAlign/tof_align.cpp
+169 −0 examples/SpatialDetection/spatial_tiny_yolo_tof.cpp
+140 −0 examples/ToF/tof_depth.cpp
+1 −1 include/depthai/device/CalibrationHandler.hpp
+10 −0 include/depthai/device/DeviceBase.hpp
+33 −0 include/depthai/pipeline/datatype/ImageAlignConfig.hpp
+0 −7 include/depthai/pipeline/datatype/ToFConfig.hpp
+62 −0 include/depthai/pipeline/node/Cast.hpp
+91 −0 include/depthai/pipeline/node/ImageAlign.hpp
+26 −2 include/depthai/pipeline/node/ToF.hpp
+5 −5 include/depthai/pipeline/node/VideoEncoder.hpp
+2 −0 include/depthai/pipeline/nodes.hpp
+1 −1 shared/depthai-shared
+4 −0 src/device/DeviceBase.cpp
+13 −0 src/device/DeviceBootloader.cpp
+21 −0 src/pipeline/datatype/ImageAlignConfig.cpp
+2 −0 src/pipeline/datatype/ImgFrame.cpp
+46 −17 src/pipeline/datatype/StreamMessageParser.cpp
+1 −21 src/pipeline/datatype/ToFConfig.cpp
+34 −0 src/pipeline/node/Cast.cpp
+46 −0 src/pipeline/node/ImageAlign.cpp
+11 −1 src/pipeline/node/ToF.cpp
+0 −1 tests/src/image_manip_node_test.cpp
+24 −5 tests/src/stream_message_parser_test.cpp
1 change: 1 addition & 0 deletions examples/Camera/thermal_cam.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ def onMouse(event, x, y, *args):

# Thermal camera
thermalCam = pipeline.create(dai.node.Camera)
thermalCam.setFps(25) # Limit to 25 to match what the sensor can do, capped even if left at default, but warns.
width, height = -1, -1
thermalFound = False
for features in device.getConnectedCameraFeatures():
Expand Down
49 changes: 49 additions & 0 deletions examples/Cast/blur.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import depthai as dai
import cv2
from pathlib import Path

SHAPE = 300

p = dai.Pipeline()

camRgb = p.create(dai.node.ColorCamera)
nn = p.create(dai.node.NeuralNetwork)
rgbOut = p.create(dai.node.XLinkOut)
cast = p.create(dai.node.Cast)
castXout = p.create(dai.node.XLinkOut)

camRgb.setPreviewSize(SHAPE, SHAPE)
camRgb.setInterleaved(False)

nnBlobPath = (Path(__file__).parent / Path('../models/blur_simplified_openvino_2021.4_6shave.blob')).resolve().absolute()

nn.setBlobPath(nnBlobPath)

rgbOut.setStreamName("rgb")

castXout.setStreamName("cast")

cast.setOutputFrameType(dai.RawImgFrame.Type.BGR888p)

# Linking
camRgb.preview.link(nn.input)
camRgb.preview.link(rgbOut.input)
nn.out.link(cast.input)
cast.output.link(castXout.input)

with dai.Device(p) as device:
qCam = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False)


while True:
inCast = qCast.get()
assert isinstance(inCast, dai.ImgFrame)
inRgb = qCam.get()
assert isinstance(inRgb, dai.ImgFrame)
cv2.imshow("Blur", inCast.getCvFrame())
cv2.imshow("Original", inRgb.getCvFrame())


if cv2.waitKey(1) == ord('q'):
break
59 changes: 59 additions & 0 deletions examples/Cast/concat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import numpy as np
import cv2
import depthai as dai
from pathlib import Path

SHAPE = 300

p = dai.Pipeline()

camRgb = p.create(dai.node.ColorCamera)
left = p.create(dai.node.MonoCamera)
right = p.create(dai.node.MonoCamera)
manipLeft = p.create(dai.node.ImageManip)
manipRight = p.create(dai.node.ImageManip)
nn = p.create(dai.node.NeuralNetwork)
cast = p.create(dai.node.Cast)
castXout = p.create(dai.node.XLinkOut)

camRgb.setPreviewSize(SHAPE, SHAPE)
camRgb.setInterleaved(False)
camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)

left.setCamera("left")
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
manipLeft.initialConfig.setResize(SHAPE, SHAPE)
manipLeft.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)

right.setCamera("right")
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
manipRight.initialConfig.setResize(SHAPE, SHAPE)
manipRight.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p)

nnBlobPath = (Path(__file__).parent / Path('../models/concat_openvino_2021.4_6shave.blob')).resolve().absolute()
nn.setBlobPath(nnBlobPath)
nn.setNumInferenceThreads(2)

castXout.setStreamName("cast")
cast.setOutputFrameType(dai.ImgFrame.Type.BGR888p)

# Linking
left.out.link(manipLeft.inputImage)
right.out.link(manipRight.inputImage)
manipLeft.out.link(nn.inputs['img1'])
camRgb.preview.link(nn.inputs['img2'])
manipRight.out.link(nn.inputs['img3'])
nn.out.link(cast.input)
cast.output.link(castXout.input)

# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False)

while True:
inCast = qCast.get()
assert isinstance(inCast, dai.ImgFrame)
cv2.imshow("Concated frames", inCast.getCvFrame())

if cv2.waitKey(1) == ord('q'):
break
60 changes: 60 additions & 0 deletions examples/Cast/diff.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import cv2
import depthai as dai
from pathlib import Path

SHAPE = 720

p = dai.Pipeline()

camRgb = p.create(dai.node.ColorCamera)
nn = p.create(dai.node.NeuralNetwork)
script = p.create(dai.node.Script)
rgbXout = p.create(dai.node.XLinkOut)
cast = p.create(dai.node.Cast)
castXout = p.create(dai.node.XLinkOut)

camRgb.setVideoSize(SHAPE, SHAPE)
camRgb.setPreviewSize(SHAPE, SHAPE)
camRgb.setInterleaved(False)

nnBlobPath = (Path(__file__).parent / Path('../models/diff_openvino_2022.1_6shave.blob')).resolve().absolute()
nn.setBlobPath(nnBlobPath)

script.setScript("""
old = node.io['in'].get()
while True:
frame = node.io['in'].get()
node.io['img1'].send(old)
node.io['img2'].send(frame)
old = frame
""")

rgbXout.setStreamName("rgb")
castXout.setStreamName("cast")
cast.setOutputFrameType(dai.RawImgFrame.Type.GRAY8)

# Linking
camRgb.preview.link(script.inputs['in'])
script.outputs['img1'].link(nn.inputs['img1'])
script.outputs['img2'].link(nn.inputs['img2'])
camRgb.video.link(rgbXout.input)
nn.out.link(cast.input)
cast.output.link(castXout.input)

# Pipeline is defined, now we can connect to the device
with dai.Device(p) as device:
qCam = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False)


while True:
colorFrame = qCam.get()
assert isinstance(colorFrame, dai.ImgFrame)
cv2.imshow("Color", colorFrame.getCvFrame())

inCast = qCast.get()
assert isinstance(inCast, dai.ImgFrame)
cv2.imshow("Diff", inCast.getCvFrame())

if cv2.waitKey(1) == ord('q'):
break
Loading

0 comments on commit ad4ddea

Please sign in to comment.