diff --git a/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/1_feature_request.yml b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/1_feature_request.yml new file mode 100644 index 000000000..7b161ecdb --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/1_feature_request.yml @@ -0,0 +1,43 @@ +name: Feature request 🧭 +description: Suggest an idea for this project. +labels: "feature_request" +body: + - type: markdown + attributes: + value: | + # Welcome 👋 + + Thanks for taking the time to fill out this feature request module. + Please fill out each section below. This info allows Stereolabs developers to correctly evaluate your request. + + Useful Links: + - Documentation: https://www.stereolabs.com/docs/ + - Stereolabs support: https://support.stereolabs.com/hc/en-us/ + - type: checkboxes + attributes: + label: Preliminary Checks + description: Please make sure that you verify each checkbox and follow the instructions for them. + options: + - label: "This issue is not a duplicate. Before opening a new issue, please search existing issues." + required: true + - label: "This issue is not a question, bug report, or anything other than a feature request directly related to this project." + required: true + - type: textarea + attributes: + label: Proposal + description: "What would you like to have as a new feature?" + placeholder: "A clear and concise description of what you want to happen." + validations: + required: true + - type: textarea + attributes: + label: Use-Case + description: "How would this help you?" + placeholder: "Tell us more what you'd like to achieve." + validations: + required: false + - type: textarea + id: anything-else + attributes: + label: Anything else? + description: "Let us know if you have anything else to share" diff --git a/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/2_bug_report.yml b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/2_bug_report.yml new file mode 100644 index 000000000..30ac21e9e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/2_bug_report.yml @@ -0,0 +1,87 @@ +name: Bug Report 🐛 +description: Something isn't working as expected? Report your bugs here. +labels: "bug" +body: + - type: markdown + attributes: + value: | + # Welcome 👋 + + Thanks for taking the time to fill out this bug report. + Please fill out each section below. This info allows Stereolabs developers to diagnose (and fix!) your issue as quickly as possible. Otherwise we might need to close the issue without e.g. clear reproduction steps. + + Bug reports also shoulnd't be used for generic questions, please use the [Stereolabs Community forum](https://community.stereolabs.com/) instead. + + Useful Links: + - Documentation: https://www.stereolabs.com/docs/ + - Stereolabs support: https://support.stereolabs.com/hc/en-us/ + - type: checkboxes + attributes: + label: Preliminary Checks + description: Please make sure that you verify each checkbox and follow the instructions for them. + options: + - label: "This issue is not a duplicate. Before opening a new issue, please search existing issues." + required: true + - label: "This issue is not a question, feature request, or anything other than a bug report directly related to this project." + required: true + - type: textarea + attributes: + label: Description + description: Describe the issue that you're seeing. + placeholder: Be as precise as you can. Feel free to share screenshots, videos, or data. The more information you provide the easier will be to provide you with a fast solution. + validations: + required: true + - type: textarea + attributes: + label: Steps to Reproduce + description: Clear steps describing how to reproduce the issue. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: textarea + attributes: + label: Expected Result + description: Describe what you expected to happen. + validations: + required: true + - type: textarea + attributes: + label: Actual Result + description: Describe what actually happened. + validations: + required: true + - type: dropdown + attributes: + label: ZED Camera model + description: What model of ZED camera are you using? + options: + - "ZED" + - "ZED Mini" + - "ZED2" + - "ZED2i" + validations: + required: true + - type: textarea + attributes: + label: Environment + render: shell + description: Useful information about your system. + placeholder: | + OS: Operating System + CPU: e.g. ARM + GPU: Nvidia Jetson Xavier NX + ZED SDK version: e.g. v3.5.3 + Other info: e.g. ROS Melodic + validations: + required: true + - type: textarea + attributes: + label: Anything else? + description: Please add any other information or comment that you think may be useful for solving the problem + placeholder: + validations: + required: false diff --git a/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/config.yml b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..bf0924358 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,12 @@ +blank_issues_enabled: false +contact_links: + - name: Online Documentation + url: https://www.stereolabs.com/docs/ + about: Check out the Stereolabs documentation for answers to common questions. + - name: Stereolabs Community + url: https://community.stereolabs.com/ + about: Ask questions, request features & discuss with other users and developers. + - name: Stereolabs Twitter + url: https://twitter.com/Stereolabs3D + about: The official Stereolabs Twitter account to ask questions, comment our products and share your projects with the ZED community. + diff --git a/src/lib/zed-ros2-wrapper/.github/workflows/stale_issues.yml b/src/lib/zed-ros2-wrapper/.github/workflows/stale_issues.yml new file mode 100644 index 000000000..9e01bd591 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.github/workflows/stale_issues.yml @@ -0,0 +1,23 @@ +name: 'Stale issue handler' +on: + workflow_dispatch: + schedule: + - cron: '00 00 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@main + id: stale + with: + stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment otherwise it will be automatically closed in 5 days' + stale-pr-message: 'This PR is stale because it has been open 30 days with no activity. Remove stale label or comment otherwise it will be automatically closed in 5 days' + days-before-stale: 30 + days-before-close: 5 + operations-per-run: 1500 + exempt-issue-labels: 'feature_request' + exempt-pr-labels: 'feature_request' + enable-statistics: 'true' + close-issue-label: 'closed_for_stale' + close-pr-label: 'closed_for_stale' diff --git a/src/lib/zed-ros2-wrapper/.gitignore b/src/lib/zed-ros2-wrapper/.gitignore new file mode 100644 index 000000000..87df7b98a --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.gitignore @@ -0,0 +1,98 @@ +devel/ +logs/ +build/ +log/ +install/ +bin/ +lib/ +msg_gen/ +srv_gen/ +msg/*Action.msg +msg/*ActionFeedback.msg +msg/*ActionGoal.msg +msg/*ActionResult.msg +msg/*Feedback.msg +msg/*Goal.msg +msg/*Result.msg +msg/_*.py +build_isolated/ +devel_isolated/ + +# Generated by dynamic reconfigure +*.cfgc +/cfg/cpp/ +/cfg/*.py + +# Ignore generated docs +*.dox +*.wikidoc + +# eclipse stuff +.project +.cproject + +# qcreator stuff +CMakeLists.txt.user + +srv/_*.py +*.pcd +*.pyc +qtcreator-* +*.user + +/planning/cfg +/planning/docs +/planning/src + +*~ + +# Emacs +.#* + +# Catkin custom files +CATKIN_IGNORE + +# C++ objects and libs + +*.slo +*.lo +*.o +*.a +*.la +*.lai +*.so +*.dll +*.dylib + +# Qt-es + +/.qmake.cache +/.qmake.stash +*.pro.user +*.pro.user.* +*.qbs.user +*.qbs.user.* +*.moc +moc_*.cpp +moc_*.h +qrc_*.cpp +ui_*.h +Makefile* +*build-* + +# QtCreator + +*.autosave + +# QtCtreator Qml +*.qmlproject.user +*.qmlproject.user.* + +# QtCtreator CMake +CMakeLists.txt.user* + +# VSCode +.vscode + +# TMP folders +docker/tmp_sources diff --git a/src/lib/zed-ros2-wrapper/.gitrepo b/src/lib/zed-ros2-wrapper/.gitrepo new file mode 100644 index 000000000..a9407024c --- /dev/null +++ b/src/lib/zed-ros2-wrapper/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = git@github.com:stereolabs/zed-ros2-wrapper.git + branch = master + commit = b47c72779add6d58a4dba9c888b9ea529f6ae448 + parent = b16d3e70bda13036769caabb9e53982d75a89785 + method = merge + cmdver = 0.4.9 diff --git a/src/lib/zed-ros2-wrapper/CHANGELOG.rst b/src/lib/zed-ros2-wrapper/CHANGELOG.rst new file mode 100644 index 000000000..880bc52f5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/CHANGELOG.rst @@ -0,0 +1,603 @@ +LATEST CHANGES +============== + +v5.2.2 +---------- +- Default Positional Tracking mode changed back to `GEN_1` until the stability and reliability of `GEN_3` is improved. + Users can still select a specific mode by setting the `pos_tracking.pos_tracking_mode` parameter to `GEN_1`, `GEN_2`, or `GEN_3` according to their needs and preferences. +- Modified node behaviors when Positional Tracking is disabled [`pos_tracking.pos_tracking_enabled: false`]: + + - `publish_tf` is automatically disabled. + - The `odom` related topics are no longer advertised. + - The `pose` related topics are no longer advertised. + - The GNSS fusion is automatically disabled. + - The Plane Detection is automatically disabled. + - The Positional Tracking services are no longer advertised. + - Depth stability follows the ZED SDK behaviors. + - Object Tracking follows the ZED SDK behaviors. + - Body Tracking follows the ZED SDK behaviors. + +- Add new parameter `debug.debug_dyn_params` to enable debug logs for dynamic parameters changes. + + - Dynamic parameters related logs are now displayed only if the new debug parameter `debug.debug_dyn_params` is set to `true` to avoid log spam when changing dynamic parameters. + +v5.2.1 +------ +- Added the parameter `general.grab_compute_capping_fps` to define a computation upper limit to the grab frequency. + + - This can be useful to get a known constant fixed rate or limit the computation load while keeping a short exposure time by setting a high camera capture framerate. + - If set to 0, the grab compute capping will be disabled, and the ZED SDK will process data at the grab rate. +- URDF now belongs to the `zed_description` package, which is now a dependency of the `zed_wrapper` package. This allows to use the URDF files of the ZED ROS2 Wrapper in other packages without depending on the whole wrapper. + + - The `zed_description` is available in binary form for ROS 2 Humble, Jazzy, and Rolling and can be installed with `sudo apt install ros-$ROS_DISTRO-zed-description` + +v5.2.0 +------ +- Removed the `zed_wrapper/urdf/include/materials.urdf.xacro` file and moved the material settings directly in the `zed_macro.urdf.xacro` file to avoid possible conflicts in multi-camera configurations. Thx @davesarmoury for the fix +- Added the `enable_localization_only` parameter to the configuration to allow the camera to localize in the loaded area memory without updating the map with new information. +- Added support for the ZED SDK Positional Tracking 2D mode if the SDK version is 5.1 or higher. +- Added the `zed_debug` package for debugging ZED Components by loading them in a single C++ process. +- Add `enable_depth` service to disable depth processing at runtime +- Positional Tracking `GEN_3` is now the default mode when using ZED SDK v5.2 or newer, providing improved stability and performance. The `GEN_2` mode is still available as an option for users who prefer it or need it for specific use cases. +- When using GEN_3 with ZED SDK v5.2 or newer, Positional Tracking continues to provide localization feedback even if depth is disabled at runtime or when the node starts by setting the `depth.depth_mode` parameter to `NONE`. +- New diagnostic information regarding Positional Tracking status: "Mode", "Odometry Status", "Spatial Memory Status", "Tracking Fusion Status". + +v5.1.0 +------ +- Changed ZED Camera image topic names to match the cleaner convention used by ZED X One cameras: + + - **NOTE** THIS IS A BREAKING CHANGE. + - Left sensor topics: + + - From `~/left/image_rect_color` to `~/left/color/rect/image` + - From `~/left_raw/image_raw_color` to `~/left/color/raw/image` + - From `~/left_gray/image_rect_gray` to `~/left/gray/rect/image` + - From `~/left_raw_gray/image_raw_gray` to `~/left/gray/raw/image` + + - Right sensor topics: + + - From `~/right/image_rect_color` to `~/right/color/rect/image` + - From `~/right_raw/image_raw_color` to `~/right/color/raw/image` + - From `~/right_gray/image_rect_gray` to `~/right/gray/rect/image` + - From `~/right_raw_gray/image_raw_gray` to `~/right/gray/raw/image` + + - RGB sensor topics (corresponding to the left sensor for the Stereo cameras): + + - From `~/rgb/image_rect_color` to `~/rgb/color/rect/image` + - From `~/rgb_raw/image_raw_color` to `~/rgb/color/raw/image` + - From `~/rgb_gray/image_rect_gray` to `~/rgb/gray/rect/image` + - From `~/rgb_raw_gray/image_raw_gray` to `~/rgb/gray/raw/image` + +- Added parameters to select what topics will be advertised when the node starts: + + - **NOTE** THIS IS A BREAKING CHANGE. TOPICS MAY NO LONGER BE AVAILABLE IF NOT ENABLED IN THE DEFAULT CONFIGURATION. Please check what topic you use and set the relevant parameter to `true`. + - `general.publish_status`: Advertise the status topics that are published only if a node subscribes to them + - `video.publish_rgb`: Advertise the RGB image topics that are published only if a node subscribes to them + - `video.publish_left_right`: Advertise the left and right image topics that are published only if a node subscribes to them + - `video.publish_raw`: Advertise the raw image topics that are published only if a node subscribes to them + - `video.publish_gray`: Advertise the gray image topics that are published only if a node subscribes to them + - `video.publish_stereo`: Advertise the stereo image topic that is published only if a node subscribes to it + - `sensors.publish_imu`: Advertise the IMU topic that is published only if a node subscribes to it + - `sensors.publish_imu_raw`: Advertise the raw IMU topic that is published only if a node subscribes to it + - `sensors.publish_cam_imu_transf`: Advertise the IMU transformation topic that is published only if a node subscribes to it + - `sensors.publish_mag`: Advertise the magnetometer topic that is published only if a node subscribes to it + - `sensors.publish_baro`: Advertise the barometer topic that is published only if a node subscribes to it + - `sensors.publish_temp`: Advertise the temperature topics that are published only if a node subscribes to them + - `region_of_interest.publish_roi_mask`: Advertise the ROI mask image topic that is published only if a node subscribes to it + - `depth.publish_depth_map`: Advertise the depth map topics that are published only if a node subscribes to them + - `depth.publish_depth_info`: Advertise the depth info topic that is published only if a node subscribes to it + - `depth.publish_point_cloud`: Advertise the point cloud topic that is published only if a node subscribes to it + - `depth.publish_depth_confidence`: Advertise the depth confidence topic that is published only if a node subscribes to it + - `depth.publish_disparity`: Advertise the disparity topic that is published only if a node subscribes to it + - `pos_tracking.publish_odom_pose`: Advertise the odometry and pose topics that are published only if a node subscribes to them + - `pos_tracking.publish_pose_cov`: Advertise the pose with covariance topic that is published only if a node subscribes to it + - `pos_tracking.publish_cam_path`: Advertise the camera odometry and pose path topics that are published only if a node subscribes to them + - `mapping.publish_det_plane`: Advertise the plane detection topics that is published only if a node subscribes to it + +- Added topic enabler feature to `sl::CameraOne` + + - **NOTE** THIS IS A BREAKING CHANGE. TOPICS MAY NO LONGER BE AVAILABLE IF NOT ENABLED IN THE DEFAULT CONFIGURATION. Please check what topic you use and set the relevant parameter to `true`. + - Added parameter `video.publish_rgb` to enable/disable RGB image publishing + - Added parameter `video.publish_raw` to enable/disable raw image publishing + - Added parameter `video.publish_gray` to enable/disable gray image publishing + - Added parameter `sensors.publish_imu`: Advertise the IMU topic that is published only if a node subscribes to it + - Added parameter `sensors.publish_imu_raw`: Advertise the raw IMU topic that is published only if a node subscribes to it + - Added parameter `sensors.publish_temp`: Advertise the temperature topics that are published only if a node subscribes to them + +- Enabled Isaac ROS NITROS integration for ZED X One cameras +- Added debug parameter `debug.debug_nitros` to enable debug logs for NITROS-related operations. +- Added debug parameter `debug.use_pub_timestamps` to use the current ROS time for the message timestamp instead of the camera timestamp. + This is useful to test data communication latency. +- Added `camera_info` in transport namespace to reflect `rviz2` requirements with the Camera plugin. + + - Added new `camInfoPubTrans` publisher for each image topic to publish the `camera_info` in the transport namespace. + - Updated `publishImageWithInfo` method to handle the new `camInfoPubTrans` publisher. + +- Added 3D visualization of the positional tracking landmarks as a point cloud on topic `~/pose/landmarks` (only with GEN_2 and GEN_3 positional tracking modes): + + - Added parameter `pos_tracking.publish_3d_landmarks` to enable/disable landmarks publishing + - Added parameter `pos_tracking.publish_lm_skip_frame` to set the frequency of landmarks publishing (0 to publish every frame) + +- Removed Point Cloud Transport as a required dependency. Point Cloud Transport is now only automatically enabled if the `point_cloud_transport` package is installed on the system. +- Removed FFMPEG Image Transport support because of a problem with the Humble distribution not allowing to set the transport parameters, and the lack of compatibility with NVIDIA® Jetson. +- Improved diagnostic information for 3D Mapping status in diagnostics +- Fixed random crash when stopping 3D Mapping +- Fixed a bug that forced the maximum publishing rate of the point cloud topic to 15 Hz with SVO files +- Set the default mode for positional tracking to `GEN_2` waiting for improvements in `GEN_3` stability +- Remapped `robot_description` topic to `_description` to allow multi-camera URDF integration +- Changed minimum depth value to 0.01 meters when using ZED SDK v5.1 or higher +- Added debug option for TF broadcasting + + - Improved TF debug logs to show frame transformations when enabled + +- Static baseline information from URDF is now overwritten by the real baseline value retrieved from the camera calibration file. +- Removed mandatory `custom_baseline` launch argument for virtual stereo cameras made with two ZED X One cameras. + The value is retrieved from the calibration file. +- IMU TF is now broadcast as static if IPC is disabled. +- IMU Transform topic is now published with TRANSIENT LOCAL durability if IPC is disabled. +- Fixed `camera_info` publishing when no image topics are subscribed +- Loop Closure log event is now displayed only in DEBUG mode to reduce log spam +- Renamed camera optical frames to comply with ROS conventions: + + - **NOTE** THIS IS A BREAKING CHANGE. Please update your TF references accordingly. + - From `_left_camera_optical_frame` to `_left_camera_frame_optical` + - From `_right_camera_optical_frame` to `_right_camera_frame_optical` + - From `_camera_optical_frame` to `_rgb_camera_frame_optical` + +- Added twist information to the `odom` topic +- Added support for the new Virtual Stereo API with SDK v5.1. + + - New launch arguments to setup the virtual camera: `serial_numbers` and `camera_ids` + - New `ZedCamera` component parameters to setup the virtual camera: `general.virtual_serial_numbers` and `general.virtual_camera_ids` + - **NOTE** ZED MEDIA SERVER IS NO LONGER REQUIRED to create a virtual Stereo camera using two ZED X One cameras. + +- Added 24-bit BGR image mode + + - Added parameter `video.enable_24bit_output` to enable/disable 24-bit BGR image publishing to `common_stereo.yaml` and `common_mono.yaml` configuration files + - **NOTE**: `video.enable_24bit_output` is disabled by default to maintain backward compatibility. Enabling this parameter will change the image message encoding from `BGRA8` to `BGR8`, which may affect existing applications that rely on the previous encoding. + +- Enabled SVO support for ZED X One cameras (playback, recording, and diagnostic) +- Set thread names according to the thread function name for easier identification in debuggers and profilers +- Enable SVO for ZedCameraOne + + - add service to pause SVO playback + - add service to set SVO frame ID + - add services to start/stop SVO recording + - publish SVO status + - publish SVO clock on `/clock` topic + +- Publish ZedCameraOne heartbeat status on `~/status/heartbeat` topic + +v5.0.0 +------ +- Backward compatible with SDK v4.2 +- Added official support for ROS 2 Jazzy Jalisco +- Note: requires the latest `zed_msgs` package v5.0.0 +- Added SVO Status topic to monitor the current SVO status of type `zed_msgs::SvoStatus` +- Added fully integrated Health Status topic of type `zed_msgs::HealthStatusStamped` + + - Remove the single health status topics to simplicy health monitoring + +- Remove `cob_srvs` dependency to use the custom `zed_msgs::SetSvoFrame` service +- Added Heartbeat status message at 1 Hz: `~/status/heartbeat` +- Improve performance with the default stereo configuration +- Fix Positional Tracking enabling when required by ZED SDK modules +- Fix realtime IMU data publishing when using SVO2 +- Added parameter 'debug.sdk_verbose_log_file' to Stereo and Mono components to set the path of the SDK verbose log file +- Clean shutdown of ZED components using `pre_shutdown_callback` +- Added new parameter `svo.replay_rate` to set the replay rate for the SVO when not used in realtime mode (range [0.10-5.0]) +- Improved diagnostic information for SVO playback +- Default SVO Recording Compression mode [`0`] is forced to `H265` replacing the old `LOSSLESS` mode + + - H265 is far superior as it uses hardware encoder, resulting in faster, lighter encoding, and dramatically smaller SVO2 files + +- Added `/clock` publisher to be used in SVO Mode to synchronize other nodes with the SVO timestamp +- Added parameter `svo.publish_svo_clock` to enable the `/clock` publisher + + - The parameter 'svo.publish_svo_clock' is normally overridden by the `publish_svo_clock` launch option + +- Moved `brightness`, `contrast`, and `hue` from `common_stereo.yaml` to `zed.yaml`, `zed2.yaml`, `zed2i.yaml`, and `zedm.yaml` files +- Add advanced handling of the Object Detection and Tracking module of the ZED SDK + + - Move the multi-box native object detection parameters to the `object_detection.yaml` file + - Add specific parameters to set the confidence threshold for each of the includes object detection classes of the ZED SDK + - Move the Custom Object Detection parameters to the `custom_object_detection.yaml` file + - Support all the new parameters of the ZED SDK v5 separately for each of the custom object detection classes + +- The usage of the new Object Detection support is fully described on the ZED ROS 2 online documentation: + + - Object Detection: https://docs.stereolabs.com/ros2/object-detection/ + - Custom Object Detection: https://docs.stereolabs.com/ros2/custom-object-detection/ + +- Separated Video/Depth data publishing into its own thread for more precise control over the publishing rate, + independent of the camera grab rate. This enables recording SVO files or processing positional tracking at + full grab rate, while publishing data at a reduced rate to optimize bandwidth usage. +- Added a new launch option 'node_log_type' to set the type of log to be used by the ZED Node. + + - The available options are `screen`, `log`, and `both`. + +- Changed `pos_tracking.area_memory_db_path` to `pos_tracking.area_file_path` to match the ZED SDK parameter name +- Added parameter `pos_tracking.save_area_memory_on_closing` to save the Area Memory before closing the camera +- Fixed Area Mapping file handling according to the ZED SDK policies. + + - The Area Memory file is now saved only if the Area Memory is enabled, if the `pos_tracking.save_area_memory_on_closing` + parameter is set to `true`, if the `pos_tracking.area_file_path` is set and if the `pos_tracking.area_file_path` is valid. + +- Added `save_area_memory` service + + - Set the filename as a parameter. If the filename is empty, it uses the value of the parameter `pos_tracking.area_file_path` if not empty. + +- Added `enable_ipc` launch argument to enable intra-process communication (IPC) when using ROS 2 Composition. + + - Note: NITROS requires IPC to be disabled to work properly. + +- Fixed plane topic names, adding missing node name prefix +- Added camera_info to Confidence Map topic +- Enabled Isaac ROS integration and automatic NITROS usage: https://docs.stereolabs.com/isaac-ros/ + + - Added the parameter `debug.disable_nitros` to disable NITROS usage. This is useful for debugging and testing purposes. + +v4.2.5 +------ +- Add new parameter 'depth.point_cloud_res' to set a specific point cloud publishing resolution + + - 'COMPACT': Standard resolution. Optimizes processing and bandwidth + - 'REDUCED': Half 'COMPACT' resolution. Low processing and low bandwidth requirements + +- Add uptime and frame drop rate information to node diagnostics +- Add image validity check support [SDK 5 required] + + - Add new parameter 'general.enable_image_validity_check' + - Add new topic 'health_status/low_image_quality' to publish image quality status + - Add new topic 'health_status/low_lighting' to publish low light condition status + - Add new topic 'health_status/low_depth_reliability' to publish low depth quality status + - Add new topic 'health_status/low_motion_sensors_reliability' to publish low quality of inertial sensors status + - Set the Node Disgnostic to WARNING if any of the above conditions are detected + +- Add `general.camera_id` parameter to set the camera ID for the ZedCamera. +- Add `general.camera_id` parameter to set the camera ID for the ZedCameraOne. +- Add `camera_id` argument to the `zed_camera.launch.py` launch file. Useful for GMSL2 multi-camera configurations where camera ID is estabilished by the GMSL2 wire. +- Improve Node Diagnostics information +- Add `pos_tracking.reset_pose_with_svo_loop` parameter to reset the camera pose the `initial_base_pose` when the SVO loop is enabled and the SVO playback reaches the end of the file. +- Add `svo.play_from_frame` parameter to set the starting frame when playing an SVO file. +- Add `set_svo_frame` service to set the current frame when playing an SVO file. +- Remove unused open timeout for ZED X One cameras +- Add parameter `svo.use_svo_timestamps` to use the SVO timestamps when publishing data (both stereo and mono components) + +v4.2.x +------ +- Add new `OPTIMIZED` mode for `general.pub_resolution` +- Add new parameter `general.async_image_retrieval` to enable/disable the asynchronous image retrieval to be used with SVO recording. +- Set the Positional Tracking Mode to `GEN_1` as default as wa orkaround for the random crash issue caused by `GEN_2` mode. +- Fixed a bug for raw gray image publisher on Zed One Component: raw gray images were not published when the rectified image topic was subscribed. Thx @Alex-Beh +- Enabled grayscale output for ZED X One cameras (SDK v4.2.3 required) +- Enabled streaming input for ZED X One cameras (SDK v4.2.3 required) +- Fixed wrong range check for the `general.pub_downscale_factor` parameter +- Enhanced sensor thread rate due to an automatically adjusting sleep time +- Removed the `zed-ros2-interfaces` sub-module. The `zed_msgs` package is now included in ROS 2 Humble binaries and can be installed with `sudo apt install ros-humble-zed-msgs`. +- Fixed 4K resolution support for ZED X One 4K cameras +- Changed C++ version to 17 to follow ROS 2 Humble standard +- Renamed `common.yaml` to `common_stereo.yaml` +- Added `common_mono.yaml` for monocular cameras +- Added `video.enable_hdr` to `zedxone4k.yaml` for monocular 4K cameras +- Changed the name of the package `zed_interfaces` to `zed_msgs` to match the ROS 2 naming convention +- Added the new `stereolabs::ZedCameraOne` component to handle ZED X One cameras +- Removed the ZED Wrapper executable node. + + - Modified the launch file to create an isolated composable container that loads the `stereolabs:ZedCamera` or the `stereolabs:ZedCameraOne` component according to the camera model + +- Added support for custom ONNX detection engine (SDK v4.2 required) + + - Added value `CUSTOM_YOLOLIKE_BOX_OBJECTS` to the `object_detection.model` parameter + - Added parameter `object_detection.custom_onnx_file` to set the full path of the custom ONNX file + - Added parameter `object_detection.onnx_input_size` to set the size of the YOLO input tensor + - Added parameter `object_detection.custom_label_yaml` to set the full path to custom YAML file storing class labels in [COCO format](https://docs.ultralytics.com/datasets/detect/coco/#dataset-yaml) + +v4.1.x +------ +- Updated the Docker files to the CUDA 12.4 (PC), L4T 35.4 (Jetson), SDK v4.1.4 +- Added Local Streaming output + + - Added `enable_streaming` service to start/stop a streaming server + - Added Streaming Server diagnostic + - Added parameter 'stream_server.stream_enabled': enable the streaming server when the camera is open + - Added parameter 'stream_server.codec': different encoding types for image streaming + - Added parameter 'stream_server.port': Port used for streaming + - Added parameter 'stream_server.bitrate': Streaming bitrate (in Kbits/s) used for streaming + - Added parameter 'stream_server.gop_size': The GOP size determines the maximum distance between IDR/I-frames + - Added parameter 'stream_server.adaptative_bitrate': Bitrate will be adjusted depending on the number of packets dropped during streaming + - Added parameter 'stream_server.chunk_size': Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long + - Added parameter 'stream_server.target_framerate': Framerate for the streaming output + +- Added Local Streaming input + + - Added 'stream.stream_address' and 'stream.stream_port' parameter to configure the local streaming input + +- GNSS Fusion temporarily disabled *(available with 4.1.1)* +- Moved parameter 'general.svo_file' to 'svo.svo_path' +- Moved parameter 'general.svo_loop' to 'svo.svo_loop' +- Moved parameter 'general.svo_realtime' to 'svo.svo_realtime' +- Removed obsolete launch files: 'zed.launch.pi','zed2.launch.pi', 'zed2i.launch.pi', 'zedm.launch.pi', 'zedx.launch.pi', 'zedxm.launch.pi' +- Removed obsolete display launch file: 'display_zed.launch.py', 'display_zed2.launch.py', 'display_zed2i.launch.py', 'display_zedm.launch.py', 'display_zedx.launch.py', 'display_zedxm.launch.py' +- Added support for custom virtual stereo cameras made with two calibrated ZED X One cameras *(available with 4.1.1)* +- Added parameter `pos_tracking.reset_odom_with_loop_closure` to automatically reset odometry when a loop closure is detected +- Added new positional tracking information to the `PosTrackStatus` message +- Added new `GnssFusionStatus` message with GNSS Fusion status information *(available with 4.1.1)* +- Added new parameters `gnss_fusion.h_covariance_mul` and `gnss_fusion.v_covariance_mul` to control the effects of the GNSS covariance +- Added support to Automatic ROI + + - Added ROI diagnostic + - Added parameter `debug.debug_roi` + - Publish ROI mask image on the topic `~/roi_mask` using image transport + - Moved the parameter `general.region_of_interest` to `region_of_interest.manual_polygon` + - Added automatic Region of Interest support + - Added parameter `region_of_interest.automatic_roi` + - Added parameter `region_of_interest.depth_far_threshold_meters` + - Added parameter `region_of_interest.image_height_ratio_cutoff` + - Added parameter `region_of_interest.apply_to_depth` + - Added parameter `region_of_interest.apply_to_positional_tracking` + - Added parameter `region_of_interest.apply_to_object_detection` + - Added parameter `region_of_interest.apply_to_body_tracking` + - Added parameter `region_of_interest.apply_to_spatial_mapping` + +- Removed QoS parameters to use ROS 2 QoS overwrite -> https://design.ros2.org/articles/qos_configurability.html +- Added support for new `NEURAL_PLUS` depth mode +- Added new `_gnss_link` frame to URDF to set the position of the GNSS antenna with respect to the camera position +- New Docker configuration files allow to easily create "ZED ROS2 Wrapper" images based on specific tag versions. [Read more](./docker/README.md) +- Fixed a bug while playing a ZED X stream on a "not-Jetson" host device +- Add support for point cloud transport [only Humble, no Foxy] +- Add support for FFMPEG image transport +- Add new `ffmpeg.yaml` configuration file +- Fix `~/imu/data_raw` message not containing RAW IMU data + +v4.0.8 +------ +- The parameter `general.sdk_verbose` has been moved to `debug.sdk_verbose` and set to `0` as default. +- Added new parameter `general.optional_opencv_calibration_file` to use custom OpenCV camera calibrations. +- Added [new tutorial](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_robot_integration) to illustrate how to integrate one or more ZED cameras on a robot +- Added 'simulation.sim_enabled' parameter to enable the simulation mode +- Added 'simulation.sim_address' parameter to set the simulation server address +- Added 'simulation.sim_port' parameter to set the simulation server port +- Added `/clock` subscriber to check the presence of the required message when `use_sim_time` is true +- Force `grab_frame_rate` and `pub_frame_rate` to 60 Hz in simulation +- Force `grab_resolution` to `HD1080` in simulation +- Removed the `general.zed_id` parameter. Always use `general.serial_number` to distinguish between different cameras in a multi-camera configuration. +- The multi-camera example has been updated to match the new TF configuration +- The old launch files are now obsolete: 'ros2 launch zed_wrapper .launch.py' is replaced by 'ros2 + launch zed_wrapper zed_camera.launch.py camera_model:=' +- The reference link for positional tracking is no longer 'base_link' but `_camera_link`. + This will allow an easier ZED integration in existing robot configuration because the transform `base_link` -> `camera_link` + is no longer published by the ZED ROS2 Wrapper. Thanks to @SteveMacenski for the advice + + - Removed `parent` and `origin` parameters from `zed_macro.urdf.xacro` + - Removed launch argument `cam_pose` from `zed_camera.launch.py` + +- Moved parameter `publish_imu_tf` from `pos_tracking` to `sensors` to make it available also in "no depth" configurations of the node +- Added new parameter `pos_tracking.pos_tracking_mode` to exploit the new ZED SDK `QUALITY` mode for improved odometry and localization +- New Video/Depth processing throttling method by using the `grab_compute_capping_fps` ZED SDK parameter instead of a dedicated thread +- Advanced parameters to handle Thread scheduling policy and priorities (sudo required):`thread_sched_policy`,`thread_grab_priority`, + `thread_sensor_priority`,`thread_pointcloud_priority` +- Added new GNSS calibration parameters: `enable_reinitialization`, `enable_rolling_calibration`, `enable_translation_uncertainty_target`, `gnss_vio_reinit_threshold`, `target_translation_uncertainty`, `target_yaw_uncertainty` +- Added new Plane Detection parameters: `pd_max_distance_threshold`, `pd_normal_similarity_threshold` + +v4.0.5 +---------- +- The parameter `general.pub_resolution` can now take only `NATIVE` and `CUSTOM` values. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission +- Added new parameter `general.pub_downscale_factor` to be used with the new option `CUSTOM` for the parameter `general.pub_resolution` +- `ULTRA` is the new default value for `depth.depth_mode` (better performance for odometry and positional tracking) +- Added resolution `HD1080` for ZED X +- Fix issue with Body Tracking start/stop by service call. Now Body Tracking can be restarted multiple times +- Fix depth grab performance by removing a [not required `PNG Write` call](https://github.com/stereolabs/zed-ros2-wrapper/pull/164). Thank you Esteban Zamora @ezamoraa +- Fix bug with `general.pub_resolution` value, not allowing to select the correct data publish resolution +- Added new launch parameter `ros_params_override_path` to provide the path to a custom YAML file to override the parameters of the ZED Node without modifying the original files in the `zed_wrapper/config` folder. Thank you David Lu @MetroRobots + +v4.0.0 +------ +- Added support for ZED-X and ZED-X Mini + + - Moved `general.grab_resolution` and `general.grab_frame_rate` to the yaml file specific for the relative camera model (i.e. `zed.yaml`, `zedm.yaml`, `zed2.yaml`, `zed2i.yaml`, `zedx.yaml`, `zedxm.yaml`) + + - Added `zedx.launch.py` for ZED-X + - Added `zedxm.launch.py` for ZED-X Mini + - Improve `zed_macro.urdf.xacro` with specific configuration for the new camera models + - Added `display_zedx.launch.py` for ZED-X to ZED-ROS2-Examples + - Added `display_zedxm.launch.py` for ZED-X Mini to ZED-ROS2-Examples + - Added ZED-X and ZED-X Mini STL files to ZED-ROS2-Interfaces + +- Positional Tracking + + - Added `pos_tracking.set_as_static` parameters for applications with a static camera monitoring a robotics environment. See [PR #122](https://github.com/stereolabs/zed-ros2-wrapper/pull/122 ) Thx @gabor-kovacs + - Added custom message type `PosTrackStatus` + - Publish message on topic `~/pose/status` with the current status of the pose from the ZED SDK + - Publish message on topic `~/odom/status` with the current status of the odometry from the ZED SDK + +- Body Tracking + + - Added Support for the new Body Tracking module + - Added parameter `body_tracking.bt_enabled` to enable Body Tracking + - Added parameter `body_tracking.model` to set the AI model to be used + - Added parameter `body_tracking.body_format` to set the Body Format to be used + - Added parameter `body_tracking.allow_reduced_precision_inference` to improve performances + - Added parameter `body_tracking.max_range` to set the max range for Body Detection + - Added parameter `body_tracking.body_kp_selection` to choose the Body key points to be used + - Added parameter `body_tracking.enable_body_fitting` to enable body fitting + - Added parameter `body_tracking.enable_tracking` to enable the tracking of the detected bodies + - Added parameter `body_tracking.prediction_timeout_s` to set the timeout of the prediction phase while tracking + - Added parameter `body_tracking.confidence_threshold` to set the detection confidence threshold + - Added parameter `body_tracking.minimum_keypoints_threshold` to set the minimum number of detected key points to consider a body valid + - Publish new message on topic `~/body_trk/skeletons` + - Added service `enable_body_trk` to start/stop body tracking + +- GNSS fusion integration + + - New param `gnss_fusion.gnss_fusion_enabled` to enable GNSS fusion + - New param `gnss_fusion.gnss_fix_topic` name of the topic containing GNSS Fix data of type `sensor_msgs/NavSatFix` + - Added `nmea_msgs` dependency + - Added GNSS Fix Diagnostic + - Added new launch parameter `gnss_frame` to enable the GNSS link in the ZED URDF + - Added new node parameter `gnss_fusion.gnss_zero_altitude` to ignore GNSS altitude information + - Added new node parameter `gnss_fusion.gnss_frame` to set the name of the frame link of the GNSS sensor + - Disable Area Memory (loop closure) when GNSS fusion is enabled + - Added services `toLL` and `fromLL` to use the ZED ROS2 Wrapper with the Nav2 Waypoint Navigation package + - Added `geographic_msgs::msg::GeoPoseStamped` message publisher + - Added parameter `gnss_fusion.publish_utm_tf` + - Added parameter `gnss_fusion.broadcast_utm_transform_as_parent_frame` + - Added parameter `gnss_fusion.gnss_init_distance` + - Publish message on topic `~/geo_pose/status` with the current status of the GeoPose from the ZED SDK + - Publish message on topic `~/pose/filtered` with the current GNSS filtered pose in `map` frame + - Publish message on topic `~/pose/filtered/status` with the current status of the GNSS filtered pose from the ZED SDK + +- Object Detection + + - Added `object_detection.allow_reduced_precision_inference` to allow inference to run at a lower precision to improve runtime and memory usage + - Added `object_detection.max_range` to defines a upper depth range for detections + - Removed `object_detection.body_format` + +- Docker + + - Added Docker files (see `docker` folder) ready to create Docker images for desktop host devices + +- Examples/Tutorials + + - Added multi-camera example in `zed-ros2-examples` repository. + +- Added full Terrain Mapping (local obstacle detection) support [EXPERIMENTAL FEATURE AVAILABLE ONLY FOR BETA TESTERS] + + - ZED SDK Terrain Mapping published as GridMap message + - Added parameter `local_mapping.terrain_mapping_enabled` to enable terrain mapping publishing a local obstacle map + - Added parameter `local_mapping.data_pub_rate` to set the Local Map data publish frequency + - Added parameter `local_mapping.grid_resolution` to set the Local Map resolution in meters [min: 0.01 - max: 1.0] + - Added parameter `local_mapping.grid_range` to set the maximum depth range for local map generation [min: 1.0 - max: 8.0] + - Added parameter `local_mapping.height_threshold` to set the maximum height for obstacles + - Publish gridmap on topic `local_map/gridmap` + - Publish elevation map image on topic `local_map/elev_img` + - Publish obstacle color map image on topic `local_map/col_img` + - Added traversability cost computation for Terrain Mapping (local_mapping) + + - Change parameter `local_mapping.height_threshold` to `local_mapping.robot_heigth` + - Added parameter `local_mapping.robot_radius` to set radius of the robot + - Added parameter `local_mapping.robot_max_step` to set max height of a step that the robot can overcome + - Added parameter `local_mapping.robot_max_slope` to set max slope (degrees) that the robot can overcome + - Added parameter `local_mapping.robot_max_roughness` to set max roughness of the terrain that the robot can overcome + +- Added support for simulated data [EXPERIMENTAL FEATURE AVAILABLE ONLY FOR BETA TESTERS] + + - Added parameter `use_sim_time` to enable SIMULATION mode + - Added parameter `sim_address` tos set the local address of the machine running the simulator + - Change StopWatch to use ROS clock instead of System Clock. In this way diagnostic and time checking work also in simulation + - Disable camera settings control in simulation + +- Others + + - Removed `sensing_mode`, no more available in SDK v4.0 + - Removed `extrinsic_in_camera_frame`, no more available in SDK v4.0 + - Added `zed_id` and `serial_number` launch parameters to open the correct camera in multi-camera configurations. + - Code lint and re-formatting according to [ROS2 code rules](https://docs.ros.org/en/humble/The-ROS2-Project/Contributing/Code-Style-Language-Versions.html). + - Added support for automatic lint tools to all the packages. + - Removed node parameter `general.resolution`, replaced by `general.grab_resolution`. + - Added node parameter `general.pub_resolution` used to reduce node computation and message bandwidth. + + - Available output resolutions: `HD2K`, `HD1080`, `HD720`, `MEDIUM`, `VGA`. `MEDIUM` is an optimized output resolution to maximize throughput and minimize processing costs. + + - Removed node parameters `video.img_downsample_factor` and `depth.depth_downsample_factor`. Use the new parameter `general.pub_resolution` instead. + - Change `general.grab_resolution` and `general.pub_resolution` from integer to string. + - Added new `LOW` value for `general.pub_resolution` (half the `MEDIUM` output resolution). + - Removed `depth.quality` parameter (replaced with `depth.depth_mode`) + - Added `depth.depth_mode` parameter: a string reflecting the ZED SDK `DEPTH_MODE` available value names + - The parameter `depth.depth_stabilization` is now an integer in [0,100] reflecting ZED SDK behavior + - Fix distortion model (see Issue [#128](https://github.com/stereolabs/zed-ros2-wrapper/issues/128)) + - Improve the code for Moving Average calculation for better node diagnostics. + - Temperature diagnostic is now always updated even if `sensors.sensors_image_sync` is true and no image topics are subscribed. + - Improve Grab thread and Video/Depth publishing thread elaboration time diagnostic. + - Added a check on timestamp to not publish already published point cloud messages in the point cloud thread + - Improve thread synchronization when the frequency of the `grab` SDK function is minor of the expected camera frame rate setting because of a leaking of elaboration power. + - Added diagnostic warning if the frequency of the camera grabbing thread is minor than the selected `general.grab_frame_rate` value. + - Removed annoying build log messages. Only warning regarding unsupported ROS 2 distributions will be displayed when required. + - Convert `shared_ptr` to `unique_ptr` for IPC support + - Improve the `zed_camera.launch.py` + + - Added support for `OpaqueFunction` in order to automatically configure the launch file according to the value of the launch parameter `cam_model`. + - Change parameters to set camera pose in launch files. From 6 separated parameters (`cam_pos_x`,`cam_pos_y`,`cam_pos_z`,`cam_roll`,`cam_pitch`,`cam_yaw`) to one single array (`cam_pose`). + - Removed the workaround for empty `svo_path` launch parameter values thanks to `TextSubstitution`. + - Modify the "display" launch files in [zed-ros2-examples](https://github.com/stereolabs/zed-ros2-examples) to match the new configuration. + - Added `publish_tf` and `publish_map_tf` launch parameters useful for multi-camera configuretion or external odometry fusion. + + - Change LICENSE to Apache 2.0 to match ROS 2 license. + +v3.8.x +------ +- Fixed `set_pose` wrong behavior. Now initial odometry is coherent with the new starting point. +- Added Plane Detection. +- Fixed "NO DEPTH" mode. By setting `depth/quality` to `0` now the depth extraction and all the sub-modules depending on it are correctly disabled. +- Added `debug` sub-set of parameters with new parameters `debug_mode` and `debug_sensors`. +- Added support for ROS 2 Humble. Thx @nakai-omer. + The two ROS 2 LTS releases are now supported simoultaneously. +- Set `read_only` flag in parameter descriptors for non-dynamic parameters. Thx @bjsowa. +- Enabled Intra Process Communication. The ZED node no longer publishes topics with `TRANSIENT LOCAL` durability. +- Improved TF broadcasting at grabbing frequency +- Improved IMU/Left Camera TF broadcasting at IMU frequency +- Fixed data grabbing frame rate when publishing is set to a lower value +- Added TF broadcasting diagnostic +- The parameter `general.sdk_verbose` is now an integer accepting different SDK verbose levels. +- Moved Object Detection parameters from cameras configuration files to `common.yaml` +- Moved Sensor Parameters from cameras configuration files to `common.yaml` +- New data thread configuration to maximize data publishing frequency + + - Sensor data publishing moved from timer to thread + - RGB/Depth data publishing moved from timer to thread + +- Fixed random errors when closing the node +- Fixed wrong timing when playing SVO in `real-time` mode +- Fixed units for atmospheric pressure data. Now pressure is published in `Pascals` according to the [definition of the topic](https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/FluidPressure.msg). +- Added new parameter `pos_tracking.transform_time_offset` to fix odometry TF timestamp issues +- Added new parameter `pos_tracking.depth_min_range` for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation +- Added new parameter `pos_tracking.sensor_world` to define the world type that the SDK can use to initialize the Positionnal Tracking module +- Added new parameter `object_detection.prediction_timeout` for setting the timeout time [sec] of object prediction when not detected. +- Added support for ZED SDK Regiorn of Interest: + + - Added parameter `general.region_of_interest` to set the region of interest for SDK processing. + - Added the service `resetRoi` to reset the region of interest. + - Added the service `setRoi` to set a new region of interest. + +v3.7.x +---------- +- Added support for sport-related OD objects +- Added `remove_saturated_areas` dynamic parameter to disable depth filtering when luminance >=255 +- Added `sl::ObjectDetectionParameters::filtering_mode` parameter +- Publish `depth_info` topic with current min/max depth information +- Fix parameter override problem (Issue #71). Thx @kevinanschau +- Added default xacro path value in `zed_camera.launch.py`. Thx @sttobia +- Fix `zed-ros2-interfaces` sub-module url, changing from `ssh` to `https`. + +v3.6.x (2021-12-03) +------------------- +- Moved the `zed_interfaces` package to the `zed-ros2-interfaces` repository to match the same configuration of the ROS1 wrapper +- The `zed-ros2-interfaces` repository has been added as a sub-module to this repository +- Added new _base_link frame on the base of the camera to easily handle camera positioning on robots. Thx @civerachb-cpr +- Improve URDF by adding 3° slope for ZED and ZED2, X-offset for optical frames to correctly match the CMOS sensors position on the PCB, X-offset for mounting screw on ZED2i +- Added `zed_macro.urdf.xacro` to be included by other xacro file to easily integrate ZED cameras in the robot descriptions. See ROS1 PR [#771](https://github.com/stereolabs/zed-ros-wrapper/pull/771) for details. Thx @civerachb-cpr +- Fix URDF `height` value for ZED, ZED2 and ZED2i +- Fix performances drop on slower platforms. Thx @roncapat +- Fix SVO LOOP wrong behavior. Thx @kevinanschau +- Added xacro support for automatic URDF configuration +- Reworked launch files to support xacro and launch parameters + + - Use `ros2 launch zed_wrapper -s` to retrieve all the available parameters + +- Added `svo_path:=` as input for all the launch files to start the node using an SVO as input without modifying 'common.yaml` +- Improved diagnostic information adding elaboration time on all the main tasks +- Improved diagnostic time and frequencies calculation +- Added StopWatch to sl_tools +- Enabled Diagnostic status publishing +- Changed the default values of the QoS parameter reliability for all topics from BEST_EFFORT to RELIABLE to guarantee compatibility with all ROS 2 tools +- Fixed tab error in `zedm.yaml` +- Fixed compatibility issue with ZED SDK older than v3.5 - Thanks @PhilippPolterauer +- Migration to ROS 2 Foxy Fitzroy + +v3.5.x (2021-07-05) +------------------- +- Added support for SDK v3.5 +- Added support for the new ZED 2i +- Added new parameter `pos_tracking/pos_tracking_enabled` to enable positional tracking from start even if not required by any subscribed topic. This is useful, for example, to keep the TF always updated. +- Added support for new AI models: `MULTI_CLASS_BOX_MEDIUM` and `HUMAN_BODY_MEDIUM` +- Depth advertising is disabled when depth is disabled (see `sl::DETH_MODE::NONE`) diff --git a/src/lib/zed-ros2-wrapper/CONTRIBUTING.md b/src/lib/zed-ros2-wrapper/CONTRIBUTING.md new file mode 100644 index 000000000..6ddd6877b --- /dev/null +++ b/src/lib/zed-ros2-wrapper/CONTRIBUTING.md @@ -0,0 +1,50 @@ + +## Submitting your code changes + +Code contributions should be made via pull requests to the appropriate repositories: +* [zed-ros2-wrapper](https://github.com/stereolabs/zed-ros2-wrapper/pulls) +* [zed-ros2-interfaces](https://github.com/stereolabs/zed-ros2-interfaces/pulls) +* [zed-ros2-examples](https://github.com/stereolabs/zed-ros2-examples/pulls) + +We ask all contributors to follow the practices explained in [ROS 2 documentation](https://docs.ros.org/en/humble/The-ROS2-Project/Contributing/Code-Style-Language-Versions.html). + +Before submitting a pull request please perform this list of tasks from the root of your ROS 2 workspace: +1. Automatic code formatting: + + `$ ament_uncrustify --reformat src` + +2. Build the packages to check for compile errors: + + `$ colcon build --symlink-install --cmake-args=-DCMAKE_BUILD_TYPE=Release` + +3. Perform the automatic build tests: + + `$ colcon test` + +4. Analyze and solve eventually reported errors: + + `$ colcon test-result --verbose` + +5. Repeat steps (1) -> (4) until all reported formatting errors have been resolved. + + +## License + +Any contribution that you make to this repository will +be under the Apache 2 License, as dictated by that +[license](http://www.apache.org/licenses/LICENSE-2.0.html): + +~~~ +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. +~~~ + +Contributors must sign-off each commit by adding a `Signed-off-by: ...` +line to commit messages to certify that they have the right to submit +the code they are contributing to the project according to the +[Developer Certificate of Origin (DCO)](https://developercertificate.org/). \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/LICENSE b/src/lib/zed-ros2-wrapper/LICENSE new file mode 100644 index 000000000..ae79ff664 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/LICENSE @@ -0,0 +1,251 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +## Some of TensorFlow's code is derived from Caffe, which is subject to the following copyright notice: + +COPYRIGHT + +All contributions by the University of California: + +Copyright (c) 2014, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: + +Copyright (c) 2014, the respective contributors +All rights reserved. + +Caffe uses a shared copyright model: each contributor holds copyright over +their contributions to Caffe. The project versioning records all such +contribution and copyright details. If a contributor wants to further mark +their specific copyright on a particular contribution, they should indicate +their copyright solely in the commit message of the change when it is +committed. + +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +CONTRIBUTION AGREEMENT + +By contributing to the BVLC/caffe repository through pull-request, comment, +or otherwise, the contributor releases their content to the +license and copyright terms herein. diff --git a/src/lib/zed-ros2-wrapper/README.md b/src/lib/zed-ros2-wrapper/README.md new file mode 100644 index 000000000..e83d20553 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/README.md @@ -0,0 +1,330 @@ +

+ Stereolabs
+ ROS 2 wrapper +

+ +

+ ROS 2 packages for using Stereolabs ZED Camera cameras.
+ ROS 2 Foxy Fitzroy (Ubuntu 20.04) - ROS 2 Humble Hawksbill (Ubuntu 22.04) - ROS 2 Jazzy Jalisco (Ubuntu 24.04) +

+ +
+ +This package enables the use of ZED cameras with ROS 2, providing access to a variety of data types, including: + +- Color and grayscale images, both rectified and unrectified +- Depth data +- Colored 3D point clouds +- Position and mapping, with optional GNSS data fusion +- Sensor data +- Detected objects +- Human skeleton data +- And more... + +[More information](https://www.stereolabs.com/docs/ros2) + +![Point_cloud](./images/PointCloud_Depth_ROS.jpg) + +## Installation + +### Prerequisites + +- [Ubuntu 20.04 (Focal Fossa)](https://releases.ubuntu.com/focal/), [Ubuntu 22.04 (Jammy Jellyfish)](https://releases.ubuntu.com/jammy/), or [Ubuntu 24.04 (Noble Numbat)](https://releases.ubuntu.com/noble/) +- [ZED SDK](https://www.stereolabs.com/developers/release/latest/) v5.2 (to support older versions please check the [releases](https://github.com/stereolabs/zed-ros2-wrapper/releases)) +- [CUDA](https://developer.nvidia.com/cuda-downloads) dependency +- ROS 2 Foxy Fitzroy (deprecated), ROS 2 Humble Hawksbill, or ROS 2 Jazzy Jalisco: + - [Foxy on Ubuntu 20.04](https://docs.ros.org/en/foxy/Installation/Linux-Install-Debians.html) [**Not recommended. EOL reached**] + - [Humble on Ubuntu 22.04](https://docs.ros.org/en/humble/Installation/Linux-Install-Debians.html) [EOL May 2027] + - [Jazzy Jalisco on Ubuntu 24.04](https://docs.ros.org/en/jazzy/Installation/Linux-Install-Debians.html) [EOL May 2029] + +### Build the package + +The **zed_ros2_wrapper** is a [colcon](http://design.ros2.org/articles/build_tool.html) package. + +> :pushpin: **Note:** If you haven’t set up your colcon workspace yet, please follow this short [tutorial](https://index.ros.org/doc/ros2/Tutorials/Colcon-Tutorial/). + +To install the **zed_ros2_wrapper**, open a bash terminal, clone the package from GitHub, and build it: + +```bash +mkdir -p ~/ros2_ws/src/ # create your workspace if it does not exist +cd ~/ros2_ws/src/ #use your current ros2 workspace folder +git clone https://github.com/stereolabs/zed-ros2-wrapper.git +cd .. +sudo apt update +rosdep update +rosdep install --from-paths src --ignore-src -r -y # install dependencies +colcon build --symlink-install --cmake-args=-DCMAKE_BUILD_TYPE=Release --parallel-workers $(nproc) # build the workspace +echo source $(pwd)/install/local_setup.bash >> ~/.bashrc # automatically source the installation in every new bash (optional) +source ~/.bashrc +``` + +> :pushpin: **Note:** the dependency `zed_msgs` is no longer installed as a submodule of this package; it is available through `apt` as a binary package with ROS 2 Humble. When working with ROS 2 Foxy, or other distributions, you can install it from the sources from the [zed-ros2-interfaces repository](https://github.com/stereolabs/zed-ros2-interfaces?tab=readme-ov-file#install-the-package-from-the-source-code). + +> :pushpin: **Note:** If `rosdep` is missing, you can install it with: +> +>`sudo apt-get install python3-rosdep python3-rosinstall-generator python3-vcstool python3-rosinstall build-essential` + +> :pushpin: **Note:** The `zed_debug` package is intended for internal development only. If you don’t need it, you can skip building this package by adding `--packages-skip zed_debug` to your `colcon` command. + +> :pushpin: **Note:** When using the ZED ROS 2 Wrapper on an NVIDIA Jetson with JP6, you may get the following error when building the package for the first time +> +> ```bash +> CMake Error at /usr/share/cmake-3.22/Modules/FindCUDA.cmake:859 (message): +> Specify CUDA_TOOLKIT_ROOT_DIR +> Call Stack (most recent call first): +> /usr/local/zed/zed-config.cmake:72 (find_package) +> CMakeLists.txt:81 (find_package) +> ``` +> +> You can fix the problem by installing the missing `nvidia-jetpack` packages: +> +> `sudo apt install nvidia-jetpack nvidia-jetpack-dev` +> +> :pushpin: **Note:** The option `--symlink-install` is very important, it allows the use of symlinks instead of copying files to the ROS 2 folders during the installation, where possible. Each package in ROS 2 must be installed, and all the files used by the nodes must be copied into the installation folders. Using symlinks allows you to modify them in your workspace, reflecting the modification during the next executions without issuing a new `colcon build` command. This is true only for all the files that don't need to be compiled (Python scripts, configurations, etc.). +> +> :pushpin: **Note:** If you are using a different console interface like zsh, you have to change the `source` command as follows: `echo source $(pwd)/install/local_setup.zsh >> ~/.zshrc` and `source ~/.zshrc`. + +## Starting the ZED node + +> :pushpin: **Note:** we recommend reading [this ROS 2 tuning guide](https://www.stereolabs.com/docs/ros2/dds_and_network_tuning) to improve the ROS 2 experience with ZED. + +To start the ZED node, open a bash terminal and use the [CLI](https://index.ros.org/doc/ros2/Tutorials/Introspection-with-command-line-tools/) command `ros2 launch`: + +```bash +ros2 launch zed_wrapper zed_camera.launch.py camera_model:= +``` + +Replace `` with the model of the camera that you are using: `'zed'`, `'zedm'`, `'zed2'`, `'zed2i'`, `'zedx'`, `'zedxm'`, `'zedxhdrmini'`, `'zedxhdr'`, `'zedxhdrmax'`, `'virtual'`,`'zedxonegs'`,`'zedxone4k'`,`'zedxonehdr'`. + +The `zed_camera.launch.py` is a Python launch script that automatically starts the ZED node using ["manual composition"](https://index.ros.org/doc/ros2/Tutorials/Composition/). The parameters for the indicated camera model are loaded from the relative "YAML files." +A Robot State Publisher node is started to publish the camera static links and joints loaded from the URDF model associated with the camera model. + +> :pushpin: **Note:** You can set your configurations by modifying the parameters in the files **common_stereo.yaml**, **zed.yaml** **zedm.yaml**, **zed2.yaml**, **zed2i.yaml**, **zedx.yaml**, **zedxm.yaml**, **common_mono.yaml**, **zedxonegs.yaml**, **zedxone4k.yaml**, **zedxonehdr.yaml** available in the folder `zed_wrapper/config`. + +You can get the list of all the available launch parameters by using the `-s` launch option: + +```bash +ros2 launch zed_wrapper zed_camera.launch.py -s +ros2 launch zed_display_rviz2 display_zed_cam.launch.py -s +``` + +For full descriptions of each parameter, follow the complete guide [here](https://www.stereolabs.com/docs/ros2/zed_node#configuration-parameters). + +### RViz visualization + +To start a pre-configured RViz environment and visualize the data of all ZED cameras, we provide in the [`zed-ros2-examples` repository](https://github.com/stereolabs/zed-ros2-examples/tree/master/zed_display_rviz2). You'll see more advanced examples and visualizations that demonstrate depth, point clouds, odometry, object detection, etc. + +You can also quickly check that your depth data is correctly retrieved in RViz with `rviz2 -d ./zed_wrapper/config/rviz2/.rviz`. RViz subscribes to numerous ROS topics, which can potentially impact the performance of your application compared to when it runs without RViz. + +### Simulation mode + +> :pushpin: **Note:** This feature is incompatible with the ZED X One and the older first-generation ZED cameras. + +Launch a standalone ZED ROS 2 node with simulated ZED data as input by using the following command: + +```bash +ros2 launch zed_wrapper zed_camera.launch.py camera_model:=zedx sim_mode:=true +``` + +Launch options: + +- [Mandatory] `camera_model`: indicates the model of the simulated camera. It's required that this parameter match the model of the simulated camera. In most cases, it will be a ZED X, since the first versions of the simulation plugins that we released are simulating this type of device. +- [Mandatory] `sim_mode`: start the ZED node in simulation mode if `true`. +- [Optional] `use_sim_time`: force the node to wait for valid messages on the topic `/clock`, and so use the simulation clock as the time reference. +- [Optional] `sim_address`: set the address of the simulation server. The default is `127.0.0.1`, and it's valid if the node runs on the same machine as the simulator. +- [Optional] `sim_port`: set the port of the simulation server. It must match the value of the field `Streaming Port` of the properties of the `ZED camera streamer` Action Graph node. A different `Streaming Port` value for each camera is required in multi-camera simulations. + +You can also start a preconfigured instance of `rviz2` to visualize all the information available in the simulation by using the command: + +```bash +ros2 launch zed_display_rviz2 display_zed_cam.launch.py camera_model:=zedx sim_mode:=true +``` + +The `display_zed_cam.launch.py` launch file includes the `zed_camera.launch.py` launch file, hence it gets the same parameters. + +Here's an example of `rviz2` running with the simulated information obtained by placing the ZED camera on a shelf of a simulated warehouse: + +![Sim RVIZ2](./images/sim_rviz.jpg) + +![Shelves](./images/zed_shelves.jpg) + +Supported simulation environments: + +- [NVIDIA Omniverse Isaac Sim](https://www.stereolabs.com/docs/isaac-sim/) + +## More features + +### Point Cloud Transport + +The ZED ROS 2 Wrapper supports [Point Cloud Transport](http://wiki.ros.org/point_cloud_transport) to publish point clouds using different compression methods. + +This feature is available only if the package `point_cloud_transport` is installed in your ROS 2 environment; otherwise, it will be disabled automatically. + +To install the packages, use the command: + +```bash +sudo apt install ros-$ROS_DISTRO-point-cloud-transport ros-$ROS_DISTRO-point-cloud-transport-plugins +``` + +:pushpin: **Note:** We removed the `point_cloud_transport` package as a required dependency of the ZED ROS 2 Wrapper to avoid forcing its installation in all the environments where the ZED ROS2 Wrapper is used. If you want to use this feature, you must install the package manually. + +### SVO recording + +[SVO recording](https://www.stereolabs.com/docs/video/recording/) can be started and stopped while the ZED node is running using the service `start_svo_recording` and the service `stop_svo_recording`. +[More information](https://www.stereolabs.com/docs/ros2/zed_node/#services) + +### Object Detection + +> :pushpin: **Note:** This feature is incompatible with the ZED X One and the older first-generation ZED cameras. + +Object Detection can be enabled *automatically* when the node starts by setting the parameter `object_detection/od_enabled` to `true` in the file `common_stereo.yaml`. +The Object Detection can be enabled/disabled *manually* by calling the services `enable_obj_det`. + +You can find a detailed explanation of the Object Detection module in the [ZED ROS 2 documentation](https://www.stereolabs.com/docs/ros2/object-detection). + +### Custom Object Detection with YOLO-like ONNX model file + +> :pushpin: **Note:** This feature is incompatible with the ZED X One and the older first-generation ZED cameras. + +Object Detection inference can be performed using a **custom inference engine** in YOLO-like ONNX format. + +You can generate your ONNX model by using Ultralytics YOLO tools. + +Install Ultralytics YOLO tools: + +```bash +python -m pip install ultralytics +``` + +If you have already installed the `ultralytics` package, we recommend updating it to the latest version: + +```bash +pip install -U ultralytics +``` + +Export an ONNX file from a YOLO model (more info [here](https://docs.ultralytics.com/modes/export/)), for example: + +```bash +yolo export model=yolo11n.pt format=onnx simplify=True dynamic=False imgsz=640 +``` + +For a custom-trained YOLO model, the weight file can be changed, for example: + +```bash +yolo export model=yolov8l_custom_model.pt format=onnx simplify=True dynamic=False imgsz=512 +``` + +Please refer to the [Ultralytics documentation](https://github.com/ultralytics/ultralytics) for details. + +Modify the `common_stereo.yaml` parameters to match your configuration: + +- Set `object_detection.model` to `CUSTOM_YOLOLIKE_BOX_OBJECTS` + +Modify the `custom_object_detection.yaml` parameters to match your configuration. + +> :pushpin: **Note:** The first time the custom model is used, the ZED SDK optimizes it to get the best performance from the GPU installed on the host. Please wait for the optimization to complete. When using Docker, we recommend using a shared volume to store the optimized file on the host and perform the optimization only once. + +Console log while optimization is running: + +```bash +[zed_wrapper-3] [INFO] [1729184874.634985183] [zed.zed_node]: === Starting Object Detection === +[zed_wrapper-3] [2024-10-17 17:07:55 UTC][ZED][INFO] Please wait while the AI model is being optimized for your graphics card +[zed_wrapper-3] This operation will be run only once and may take a few minutes +``` + +You can find a detailed explanation of the Custom Object Detection module in the [ZED ROS 2 documentation](https://www.stereolabs.com/docs/ros2/custom-object-detection). + +### Body Tracking + +> :pushpin: **Note:** This feature is incompatible with the ZED X One and the older first-generation ZED cameras. + +The Body Tracking can be enabled *automatically* when the node starts by setting the parameter `body_tracking/bt_enabled` to `true` in the file `common_stereo.yaml`. + +### Spatial Mapping + +> :pushpin: **Note:** This feature is incompatible with the ZED X One camera. + +The Spatial Mapping can be enabled automatically when the node starts setting the parameter `mapping/mapping_enabled` to `true` in the file `common_stereo.yaml`. +The Spatial Mapping can be enabled/disabled manually by calling the service `enable_mapping`. + +### GNSS fusion + +> :pushpin: **Note:** This feature is incompatible with the ZED X One camera. + +The ZED ROS 2 Wrapper can subscribe to a `NavSatFix` topic and fuse GNSS data information +with Positional Tracking information to obtain a precise robot localization referred to Earth coordinates. +To enable GNSS fusion, set the parameter `gnss_fusion.gnss_fusion_enabled` to `true`. +You must set the correct `gnss_frame` parameter when launching the node, e.g., `gnss_frame:='gnss_link'`. +The services `toLL` and `fromLL` can be used to convert Latitude/Longitude coordinates to robot `map` coordinates. + +### 2D mode + +> :pushpin: **Note:** This feature is incompatible with the ZED X One camera. + +For robots moving on a planar surface, activating the "2D mode" (parameter `pos_tracking/two_d_mode` in `common_stereo.yaml`) is possible. +The value of the coordinate Z for odometry and pose will have a fixed value (parameter `pos_tracking/fixed_z_value` in `common_stereo.yaml`). +Roll, Pitch, and the relative velocities will be fixed to zero. + +## NVIDIA® Isaac ROS integration + +The ZED ROS 2 Wrapper is compatible with the [NVIDIA® Isaac ROS](https://nvidia-isaac-ros.github.io/) framework, which provides a set of tools and libraries for building robotics applications on NVIDIA® platforms. + +The ZED ROS 2 Wrapper leverages [NITROS](https://nvidia-isaac-ros.github.io/concepts/nitros/index.html) (NVIDIA® Isaac Transport for ROS) a technology to enable data streaming through NVIDIA-accelerated ROS graphs. + +![NITROS communication](./images/nitros-graph.gif) + +Read the full [Isaac ROS integration guide](https://docs.stereolabs.com/isaac-ros/) and learn how to set up your development environment to use the ZED ROS2 Wrapper with Isaac ROS and NITROS. + +## Examples and Tutorials + +Examples and tutorials are provided to better understand how to use the ZED wrapper and how to integrate it into the ROS 2 framework. +See the [`zed-ros2-examples` repository](https://github.com/stereolabs/zed-ros2-examples) + +### RVIZ2 visualization examples + +- Example launch files to start a preconfigured instance of RViz displaying all the ZED Wrapper node information: [zed_display_rviz2](https://github.com/stereolabs/zed-ros2-examples/tree/master/zed_display_rviz2) +- ROS 2 plugin for ZED2 to visualize the results of the Object Detection and Body Tracking modules (bounding boxes and skeletons): [rviz-plugin-zed-od](https://github.com/stereolabs/zed-ros2-examples/tree/master/rviz-plugin-zed-od) + +### Tutorials + +A series of tutorials is provided to better understand how to use the ZED nodes in the ROS 2 environment : + +- [Video subscribing](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_video_tutorial): `zed_video_tutorial` - In this tutorial, you will learn how to write a simple node that subscribes to messages of type `sensor_msgs/Image` to retrieve the Left and Right rectified images published by the ZED node. +- [Depth subscribing](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_depth_tutorial): `zed_depth_tutorial` - In this tutorial, you will learn how to write a simple node that subscribes to messages of type `sensor_msgs/Image` to retrieve the depth images published by the ZED node and to get the measured distance at the center of the image. +- [Pose/Odometry subscribing](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_pose_tutorial): `zed_pose_tutorial` - In this tutorial, you will learn how to write a simple node that subscribes to messages of type `geometry_msgs/PoseStamped` and `nav_msgs/Odometry` to retrieve the position and the odometry of the camera while moving in the world. +- [ROS 2 Composition + BGRA2BGR conversion](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_rgb_convert): `zed_rgb_convert` - In this tutorial, you will learn how to use the concept of "ROS 2 Composition" and "Intra Process Communication" to write a ROS 2 component that gets a 4 channel BGRA image as input and re-publishes it as 3 channels BGR image. +- [ROS 2 Multi-Camera](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_multi_camera): `zed_multi_camera` - In this tutorial, you will learn how to use the provided launch file to start a multi-camera robot configuration. +- [ROS 2 Multi-Camera + Intra Process Communication](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_ipc): `zed_ipc - In this tutorial, you will learn how to use the provided launch file to start a multi-camera configuration, and load a new processing node in the same process to leverage Intra Process Communication with ROS 2composition. +- [Robot integration](https://github.com/stereolabs/zed-ros2-examples/tree/master/tutorials/zed_robot_integration): `zed_robot_integration` - In this tutorial, you will learn how to add one or more ZED cameras to a robot configuration. + +### Examples + +How to use the ZED ROS 2 nodes alongside other ROS 2 packages or advanced features. + +- [zed_aruco_localization](https://github.com/stereolabs/zed-ros2-examples/tree/master/examples/zed_aruco_localization): Use localized ArUco tag as a reference for localization. +- [zed_depth_to_laserscan](https://github.com/stereolabs/zed-ros2-examples/tree/master/examples/zed_depth_to_laserscan): Convert ZED Depth maps into virtual Laser Scans using + +## Update the local repository + +To update the repository to the latest release, use the following command that will retrieve the latest commits of `zed-ros2-wrapper` and of all the submodules: + +```bash +git checkout master # if you are not on the main branch +git pull +``` + +Clean the cache of your colcon workspace before compiling with the `colcon build` command to be sure that everything will work as expected: + +```bash +cd # replace with your workspace folder, for example ~/ros2_ws/src/ +rm -r install +rm -r build +rm -r log +colcon build --symlink-install --cmake-args=-DCMAKE_BUILD_TYPE=Release --parallel-workers $(nproc) +``` + +## Known issues + +- Nothing to report yet. + +If you find a bug or want to request a new feature, please open an issue on the [GitHub issues page](https://github.com/stereolabs/zed-ros2-wrapper/issues). diff --git a/src/lib/zed-ros2-wrapper/docker/Dockerfile.desktop-humble b/src/lib/zed-ros2-wrapper/docker/Dockerfile.desktop-humble new file mode 100644 index 000000000..cf584a445 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/Dockerfile.desktop-humble @@ -0,0 +1,134 @@ +ARG UBUNTU_MAJOR=22 +ARG UBUNTU_MINOR=04 +ARG CUDA_MAJOR=12 +ARG CUDA_MINOR=6 +ARG CUDA_PATCH=3 +ARG ZED_SDK_MAJOR=4 +ARG ZED_SDK_MINOR=2 +ARG ZED_SDK_PATCH=5 + +ARG IMAGE_NAME=nvcr.io/nvidia/cuda:${CUDA_MAJOR}.${CUDA_MINOR}.${CUDA_PATCH}-devel-ubuntu${UBUNTU_MAJOR}.${UBUNTU_MINOR} + +FROM ${IMAGE_NAME} + +ARG UBUNTU_MAJOR=22 +ARG UBUNTU_MINOR=04 +ARG CUDA_MAJOR=12 +ARG CUDA_MINOR=6 +ARG CUDA_PATCH=3 +ARG ZED_SDK_MAJOR=4 +ARG ZED_SDK_MINOR=2 +ARG ZED_SDK_PATCH=3 +# Optional: Override ZED SDK URL +ARG CUSTOM_ZED_SDK_URL="" + +ARG ROS2_DIST=humble # ROS 2 distribution + +ARG DEBIAN_FRONTEND=noninteractive + +ENV NVIDIA_DRIVER_CAPABILITIES \ + ${NVIDIA_DRIVER_CAPABILITIES:+$NVIDIA_DRIVER_CAPABILITIES,}compute,video,utility + +# Disable apt-get warnings +RUN apt-get update || true && apt-get install -y --no-install-recommends apt-utils dialog curl && \ + rm -rf /var/lib/apt/lists/* + +ENV ZED_SDK_URL=${CUSTOM_ZED_SDK_URL:-"https://download.stereolabs.com/zedsdk/${ZED_SDK_MAJOR}.${ZED_SDK_MINOR}.${ZED_SDK_PATCH}/cu${CUDA_MAJOR}/ubuntu${UBUNTU_MAJOR}"} + +# Check that this SDK exists +RUN echo "SDK link: $ZED_SDK_URL" +RUN if [ "$(curl -L -I "${ZED_SDK_URL}" -o /dev/null -s -w '%{http_code}\n' | head -n 1)" = "200" ]; then \ + echo "The URL points to something."; \ + else \ + echo "The URL does not point to a .run file or the file does not exist."; \ + exit 1; \ + fi + + +ENV TZ=Europe/Paris + +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \ + apt-get update && \ + apt-get install --yes lsb-release wget less udev sudo build-essential cmake python3 python3-dev python3-pip python3-wheel git jq libopencv-dev libpq-dev zstd usbutils && \ + rm -rf /var/lib/apt/lists/* + +############ Install ROS2 ############ + +# Set and Check Locale +RUN apt-get update || true && \ + apt-get install --no-install-recommends -y locales && \ + locale-gen en_US en_US.UTF-8 && \ + update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 && \ + export LANG=en_US.UTF-8 && \ + locale # verify settings && \ + rm -rf /var/lib/apt/lists/* + +# Setup Sources +RUN apt-get update || true && \ + apt-get install --no-install-recommends -y software-properties-common && \ + add-apt-repository universe && \ + apt-get install -y curl && \ + curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros-archive-keyring.gpg] http://packages.ros.org/ros2/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | tee /etc/apt/sources.list.d/ros2.list > /dev/null && \ + rm -rf /var/lib/apt/lists/* + +# Install ROS 2 Base packages and Python dependencies +RUN apt-get update || true && \ + apt-get install --no-install-recommends -y \ + ros-${ROS2_DIST}-ros-base \ + python3-flake8-docstrings \ + python3-pip \ + python3-pytest-cov \ + ros-dev-tools && \ + pip3 install \ + argcomplete \ + numpy \ + empy \ + lark && \ + rm -rf /var/lib/apt/lists/* + +# Initialize rosdep +RUN rosdep init && rosdep update + +# Install the ZED SDK +RUN echo "CUDA Version $CUDA_VERSION" > /usr/local/cuda/version.txt + +# Setup the ZED SDK +RUN apt-get update -y || true && \ + apt-get install --no-install-recommends dialog bash-completion lsb-release wget less udev sudo build-essential cmake zstd python3 python3-pip libpng-dev libgomp1 -y && \ + python3 -m pip install --upgrade pip; python3 -m pip install numpy opencv-python-headless && \ + wget -q -O ZED_SDK_Linux_Ubuntu.run ${ZED_SDK_URL} && \ + chmod +x ZED_SDK_Linux_Ubuntu.run && \ + ./ZED_SDK_Linux_Ubuntu.run -- silent skip_tools skip_cuda && \ + ln -sf /lib/x86_64-linux-gnu/libusb-1.0.so.0 /usr/lib/x86_64-linux-gnu/libusb-1.0.so && \ + rm ZED_SDK_Linux_Ubuntu.run && \ + rm -rf /var/lib/apt/lists/* + +# Install the ZED ROS2 Wrapper +ENV ROS_DISTRO ${ROS2_DIST} + +# Copy the sources in the Docker image +WORKDIR /root/ros2_ws/src +COPY tmp_sources/ ./ + +# RUN ls -lah /root/ros2_ws/src/ +WORKDIR /root/ros2_ws/ + +RUN /bin/bash -c "source /opt/ros/$ROS_DISTRO/setup.bash && \ + apt-get update -y || true && rosdep update && \ + rosdep install --from-paths src --ignore-src -r -y && \ + colcon build --parallel-workers $(nproc) --symlink-install \ + --event-handlers console_direct+ --base-paths src \ + --cmake-args ' -DCMAKE_BUILD_TYPE=Release' \ + ' -DCMAKE_LIBRARY_PATH=/usr/local/cuda/lib64/stubs' \ + ' -DCMAKE_CXX_FLAGS="-Wl,--allow-shlib-undefined"' " && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /root/ros2_ws + +# Setup environment variables +COPY ros_entrypoint.sh /sbin/ros_entrypoint.sh +RUN sudo chmod 755 /sbin/ros_entrypoint.sh + +ENTRYPOINT ["/sbin/ros_entrypoint.sh"] +CMD ["bash"] diff --git a/src/lib/zed-ros2-wrapper/docker/Dockerfile.l4t-humble b/src/lib/zed-ros2-wrapper/docker/Dockerfile.l4t-humble new file mode 100644 index 000000000..f8016742e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/Dockerfile.l4t-humble @@ -0,0 +1,118 @@ +# Define the L4T_VERSION argument +ARG L4T_VERSION=l4t-r36.3.0 +ARG IMAGE_NAME=dustynv/ros:humble-ros-base-l4t-r36.3.0 + +FROM ${IMAGE_NAME} + +ARG ZED_SDK_MAJOR=4 +ARG ZED_SDK_MINOR=2 +ARG ZED_SDK_PATCH=3 +ARG L4T_MAJOR=36 +ARG L4T_MINOR=3 +# Optional: Override ZED SDK URL +ARG CUSTOM_ZED_SDK_URL="" + +# ROS 2 distribution +ARG ROS2_DIST=humble + +# ZED ROS2 Wrapper dependencies version +ARG XACRO_VERSION=2.0.8 +ARG DIAGNOSTICS_VERSION=4.0.0 +ARG AMENT_LINT_VERSION=0.12.11 +ARG ROBOT_LOCALIZATION_VERSION=3.5.3 +ARG ZED_MSGS_VERSION=5.0.0 +ARG NMEA_MSGS_VERSION=2.0.0 +ARG ANGLES_VERSION=1.15.0 +ARG GEOGRAPHIC_INFO_VERSION=1.0.6 +ARG POINTCLOUD_TRANSPORT_VERSION=1.0.18 +ARG POINTCLOUD_TRANSPORT_PLUGINS_VERSION=1.0.11 +ARG RMW_CYCLONEDDS_VERSION=1.3.4 +ARG BACKWARD_ROS_VERSION=1.0.7 +ENV PYPI_URL=https://pypi.jetson-ai-lab.io/jp6/cu126 + + +ENV DEBIAN_FRONTEND=noninteractive + +# ZED SDK link +ENV ZED_SDK_URL=${CUSTOM_ZED_SDK_URL:-"https://download.stereolabs.com/zedsdk/${ZED_SDK_MAJOR}.${ZED_SDK_MINOR}.${ZED_SDK_PATCH}/l4t${L4T_MAJOR}.${L4T_MINOR}/jetsons"} + +# Check that this SDK exists +RUN if [ "$(curl -L -I "${ZED_SDK_URL}" -o /dev/null -s -w '%{http_code}\n' | head -n 1)" = "200" ]; then \ + echo "The URL points to something."; \ + else \ + echo "The URL does not point to a .run file or the file does not exist."; \ + exit 1; \ + fi + +# Disable apt-get warnings +RUN curl -fsSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros-archive-keyring.gpg && \ + apt-get update || true && apt-get install -y --no-install-recommends apt-utils dialog && \ + rm -rf /var/lib/apt/lists/* + +ENV TZ=Europe/Paris + +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && \ + apt-get update && \ + apt-get install --yes lsb-release wget less udev sudo build-essential cmake python3 python3-dev python3-pip python3-wheel git jq libpq-dev zstd usbutils && \ + rm -rf /var/lib/apt/lists/* + +RUN echo "# R${L4T_MAJOR} (release), REVISION: ${L4T_MINOR}" > /etc/nv_tegra_release && \ + apt-get update -y || true && \ + apt-get install -y --no-install-recommends zstd wget less cmake curl gnupg2 \ + build-essential python3 python3-pip python3-dev python3-setuptools libusb-1.0-0-dev \ + libgeographic-dev libdraco-dev zlib1g-dev -y && \ + export PIP_INDEX_URL="${PYPI_URL}" && \ + pip install protobuf && \ + wget -q --no-check-certificate -O ZED_SDK_Linux_JP.run \ + ${ZED_SDK_URL} && \ + chmod +x ZED_SDK_Linux_JP.run ; ./ZED_SDK_Linux_JP.run silent skip_tools && \ + rm -rf /usr/local/zed/resources/* && \ + rm -rf ZED_SDK_Linux_JP.run && \ + rm -rf /var/lib/apt/lists/* + +# Install the ZED ROS2 Wrapper +ENV ROS_DISTRO=${ROS2_DIST} + +# Copy the sources in the Docker image +WORKDIR /root/ros2_ws/src +COPY tmp_sources/ ./ + +# Install missing dependencies from the sources +WORKDIR /root/ros2_ws/src +RUN wget https://github.com/ros/xacro/archive/refs/tags/${XACRO_VERSION}.tar.gz -O - | tar -xvz && mv xacro-${XACRO_VERSION} xacro && \ + wget https://github.com/ros/diagnostics/archive/refs/tags/${DIAGNOSTICS_VERSION}.tar.gz -O - | tar -xvz && mv diagnostics-${DIAGNOSTICS_VERSION} diagnostics && \ + wget https://github.com/ament/ament_lint/archive/refs/tags/${AMENT_LINT_VERSION}.tar.gz -O - | tar -xvz && mv ament_lint-${AMENT_LINT_VERSION} ament-lint && \ + wget https://github.com/cra-ros-pkg/robot_localization/archive/refs/tags/${ROBOT_LOCALIZATION_VERSION}.tar.gz -O - | tar -xvz && mv robot_localization-${ROBOT_LOCALIZATION_VERSION} robot-localization && \ + wget https://github.com/stereolabs/zed-ros2-interfaces/archive/refs/tags/${ZED_MSGS_VERSION}.tar.gz -O - | tar -xvz && mv zed-ros2-interfaces-${ZED_MSGS_VERSION} zed-ros2-interfaces && \ + wget https://github.com/ros-drivers/nmea_msgs/archive/refs/tags/${NMEA_MSGS_VERSION}.tar.gz -O - | tar -xvz && mv nmea_msgs-${NMEA_MSGS_VERSION} nmea_msgs && \ + wget https://github.com/ros/angles/archive/refs/tags/${ANGLES_VERSION}.tar.gz -O - | tar -xvz && mv angles-${ANGLES_VERSION} angles && \ + #wget https://github.com/ros-perception/point_cloud_transport/archive/refs/tags/${POINTCLOUD_TRANSPORT_VERSION}.tar.gz -O - | tar -xvz && mv point_cloud_transport-${POINTCLOUD_TRANSPORT_VERSION} point_cloud_transport && \ + #wget https://github.com/ros-perception/point_cloud_transport_plugins/archive/refs/tags/${POINTCLOUD_TRANSPORT_PLUGINS_VERSION}.tar.gz -O - | tar -xvz && mv point_cloud_transport_plugins-${POINTCLOUD_TRANSPORT_PLUGINS_VERSION} point_cloud_transport_plugins && \ + wget https://github.com/ros2/rmw_cyclonedds/archive/refs/tags/${RMW_CYCLONEDDS_VERSION}.tar.gz -O - | tar -xvz && mv rmw_cyclonedds-${RMW_CYCLONEDDS_VERSION} rmw_cyclonedds && \ + wget https://github.com/ros-geographic-info/geographic_info/archive/refs/tags/${GEOGRAPHIC_INFO_VERSION}.tar.gz -O - | tar -xvz && mv geographic_info-${GEOGRAPHIC_INFO_VERSION} geographic-info && \ + wget https://github.com/pal-robotics/backward_ros/archive/refs/tags/${BACKWARD_ROS_VERSION}.tar.gz -O - | tar -xvz && mv backward_ros-${BACKWARD_ROS_VERSION} backward_ros && \ + cp -r geographic-info/geographic_msgs/ . && \ + rm -rf geographic-info + +# Install cython +RUN python3 -m pip install --upgrade cython + +# Build the dependencies and the ZED ROS2 Wrapper +WORKDIR /root/ros2_ws +RUN /bin/bash -c "source /opt/ros/$ROS_DISTRO/install/setup.bash && \ + colcon build --parallel-workers $(nproc) --symlink-install \ + --event-handlers console_direct+ --base-paths src \ + --cmake-args ' -DCMAKE_BUILD_TYPE=Release' \ + ' -DCMAKE_LIBRARY_PATH=/usr/local/cuda/lib64/stubs' \ + ' -DCMAKE_CXX_FLAGS="-Wl,--allow-shlib-undefined"' \ + ' --no-warn-unused-cli' " + +WORKDIR /root/ros2_ws + +# Setup environment variables +COPY ros_entrypoint_jetson.sh /sbin/ros_entrypoint.sh +RUN sudo chmod 755 /sbin/ros_entrypoint.sh + +ENTRYPOINT ["/sbin/ros_entrypoint.sh"] +CMD ["bash"] + diff --git a/src/lib/zed-ros2-wrapper/docker/README.md b/src/lib/zed-ros2-wrapper/docker/README.md new file mode 100644 index 000000000..145c915c5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/README.md @@ -0,0 +1,149 @@ +# Docker + +This folder contains a list of Dockerfile files to build Docker images ready to start the nodes of the *ZED ROS2 Wrapper*: + +* `Dockerfile.desktop-humble`: development desktop image for ROS 2 Humble, running on the specified Ubuntu and CUDA versions. The ZED Wrapper is copied from the source file of the current branch and compiled. +* `Dockerfile.l4t-humble`: Jetson image for ROS 2 Humble, running on the given L4T version (L4T35.4 by default). + +> :pushpin: **NOTE:** in the entrypoint files we set the value of the `ROS_DOMAIN_ID` environment +> variable to `0` that is the default value in ROS 2. +> +> If your setup requires a different value you can change it in the `ros_entrypoint_jetson.sh` and +> `ros_entrypoint.sh` file before building your image to set it automatically when starting your Docker image, +> or you can use the CLI command `export ROS_DOMAIN_ID=` when each interactive session is started. +> +> You can get more details concerning the `ROS_DOMAIN_ID` usage on the [official ROS 2 documentation](https://docs.ros.org/en/humble/Concepts/Intermediate/About-Domain-ID.html#the-ros-domain-id). + +## Cross compilation + +You can easily compile the image for Jetson from your usual Desktop PC. +For that you just need to run the following line before launching the build command: + +```bash +docker run --rm --privileged multiarch/qemu-user-static --reset -p yes +``` + +## Build the Docker images + +We provide a script to build your image with the right L4T / ZED SDK version. + +* Checkout the tag or commit of the ROS2 wrapper that you need. + + ```bash + git checkout + ``` + + e.g. to build the master branch: + + ```bash + git checkout master + ``` + +* Build the image for **Jetson**: + + ```bash + ./jetson_build_dockerfile_from_sdk_and_l4T_version.sh + ``` + +* Build the image for **Desktop**: + + ```bash + ./desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh + ``` + +Examples: + +```bash +# Jetson with JP6.0 and ZED SDK v4.2.5 +./jetson_build_dockerfile_from_sdk_and_l4T_version.sh l4t-r36.3.0 zedsdk-4.2.5 +``` + +```bash +# Jetson with JP6.2 and ZED SDK v5.0.0 +./jetson_build_dockerfile_from_sdk_and_l4T_version.sh l4t-r36.4.0 zedsdk-5.0.0 +``` + +```bash +# Desktop on Ubuntu 22.04m CUDA 12.6.3 and ZED SDK v4.2.5 +./desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh ubuntu-22.04 cuda-12.6.3 zedsdk-4.2.5 +``` + +> :warning: Some configurations will not work. For example, if a specific ZED SDK does not exist for a given Ubuntu/CUDA/L4T version, or if the given ROS 2 wrapper is not compatible with the selected Ubuntu version. + +## Run the Docker image + +### NVIDIA runtime + +NVIDIA drivers must be accessible from the Docker image to run the ZED SDK code on the GPU. You'll need : + +* The `nvidia` container runtime installed, following [this guide](https://www.stereolabs.com/docs/docker/install-guide-linux/#nvidia-docker) +* A specific docker runtime environment with `-gpus all` or `-e NVIDIA_DRIVER_CAPABILITIES=all` +* Docker privileged mode with `--privileged` + +### Network + +Setup the network configuration to enable the communication between the Docker image, other Docker images, and the host: + +* `--network=host`: Remove network isolation between the container and the Docker host +* `--ipc=host`: Use the host system's Inter-Process Communication namespace +* `--pid=host`: Use the host system's namespace for process ID + +### Display context to use CUDA based applications + +Use the same host `DISPLAY` environment variable in every Docker image to enable CUDA-based applications with `-e DISPLAY=$DISPLAY`. + +> :pushpin: **NOTE**: the shared volume `/tmp/.X11-unix/:/tmp/.X11-unix` is also required. + +### Volumes + +A few volumes should also be shared with the host. + +* `/tmp/.X11-unix/:/tmp/.X11-unix` is required to enable X11 server communication for CUDA-based applications +* `/usr/local/zed/settings:/usr/local/zed/settings` if you plan to use the robot in an Internet-negated area, and you previously downloaded the camera calibration files by following [this guide](https://support.stereolabs.com/hc/en-us/articles/21614848880791-How-can-I-use-the-ZED-with-Docker-on-a-robot-with-no-internet-connection). +* `/usr/local/zed/resources:/usr/local/zed/resources` if you plan to use the AI module of the ZED SDK (Object Detection, Skeleton Tracking, NEURAL depth) we suggest binding mounting a folder to avoid downloading and optimizing the AI models each time the Docker image is restarted. The first time you use the AI model inside the Docker image, it will be downloaded and optimized in the local bound-mounted folder, and stored there for the next runs. + * If you plan to use different SDK versions in different Docker images it's preferred to use a different + volume on the host for each of them: `//:/usr/local/zed/resources` +* `/dev:/dev` to share the video devices +* For GMSL2 cameras (ZED X, ZED X One) you'll also need + * `/tmp:/tmp` + * `/var/nvidia/nvcam/settings/:/var/nvidia/nvcam/settings/` + * `/etc/systemd/system/zed_x_daemon.service:/etc/systemd/system/zed_x_daemon.service` +* `/dev:/dev`: to share the video and other required devices + * `/dev/shm:/dev/shm`: to use ROS 2 with shared memory + +### Start the Docker container + +First of all, allow the container to access EGL display resources (required only once): + +```bash +sudo xhost +si:localuser:root +``` + +then you can start an interactive session: + +#### USB3 cameras + +```bash +docker run --runtime nvidia -it --privileged --network=host --ipc=host --pid=host \ + -e NVIDIA_DRIVER_CAPABILITIES=all -e DISPLAY=$DISPLAY \ + -v /tmp/.X11-unix/:/tmp/.X11-unix \ + -v /dev:/dev \ + -v /dev/shm:/dev/shm \ + -v /usr/local/zed/resources/:/usr/local/zed/resources/ \ + -v /usr/local/zed/settings/:/usr/local/zed/settings/ \ + +``` + +#### GMSL cameras + +```bash +docker run --runtime nvidia -it --privileged --network=host --ipc=host --pid=host \ + -e NVIDIA_DRIVER_CAPABILITIES=all -e DISPLAY=$DISPLAY \ + -v /tmp:/tmp \ + -v /dev:/dev \ + -v /var/nvidia/nvcam/settings/:/var/nvidia/nvcam/settings/ \ + -v /etc/systemd/system/zed_x_daemon.service:/etc/systemd/system/zed_x_daemon.service \ + -v /usr/local/zed/resources/:/usr/local/zed/resources/ \ + -v /usr/local/zed/settings/:/usr/local/zed/settings/ \ + +``` diff --git a/src/lib/zed-ros2-wrapper/docker/desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh b/src/lib/zed-ros2-wrapper/docker/desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh new file mode 100755 index 000000000..eeca6a8f7 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh @@ -0,0 +1,93 @@ +#!/bin/bash +cd $(dirname $0) + +if [ "$#" -lt 3 ]; then + echo "Give Ubuntu version then CUDA version then ZED SDK version has parameters, like this:" + echo "./desktop_build_dockerfile_from_sdk_ubuntu_and_cuda_version.sh ubuntu-22.04 cuda-12.6.3 zedsdk-4.2.3" + exit 1 +fi + +# Ubuntu version +# Verify the format (l4t-r digits.digits.digits) +if ! [[ $1 =~ ^ubuntu-[0-9]+\.[0-9]+$ ]]; then + echo "Invalid Ubuntu version format." + exit 1 +fi + +ubuntu_version=$1 +ubuntu_version_number="${ubuntu_version#ubuntu-}" + +# Split the string and assign to variables +IFS='.' read -r ubuntu_major ubuntu_minor <<< "$ubuntu_version_number" +echo "Ubuntu $ubuntu_major.$ubuntu_minor detected." + +# CUDA version +# Verify the format (l4t-r digits.digits.digits) +if ! [[ $2 =~ ^cuda-[0-9]+\.[0-9]\.[0-9]$ ]]; then + echo "Invalid CUDA version format." + exit 1 +fi + +cuda_version=$2 +cuda_version_number="${cuda_version#cuda-}" + +# Split the string and assign to variables +IFS='.' read -r cuda_major cuda_minor cuda_patch <<< "$cuda_version_number" +echo "CUDA $cuda_major.$cuda_minor.$cuda_patch detected." + + +ZED_SDK_version=$3 + +# copy the wrapper content +rm -r ./tmp_sources +mkdir -p ./tmp_sources +cp -r ../zed* ./tmp_sources + +# Check if the third arg is a custom path +CUSTOM_ZED_SDK_URL=$3 +# Use curl to check if the URL is valid (returns HTTP 200) +if [ "$(curl -L -I "${CUSTOM_ZED_SDK_URL}" -o /dev/null -s -w '%{http_code}\n' | head -n 1)" = "200" ]; then + echo "${ZED_SDK_version} detected as a valid custom installer URL" + + echo "Building dockerfile for $1, CUDA $2 and a custom ZED SDK" + + docker build -t zed_ros2_desktop_u${ubuntu_major}.${ubuntu_minor}_sdk_custom_cuda_${cuda_major}.${cuda_minor}.${cuda_patch} \ + --build-arg CUSTOM_ZED_SDK_URL=$CUSTOM_ZED_SDK_URL \ + --build-arg UBUNTU_MAJOR=$ubuntu_major \ + --build-arg UBUNTU_MINOR=$ubuntu_minor \ + --build-arg CUDA_MAJOR=$cuda_major \ + --build-arg CUDA_MINOR=$cuda_minor \ + --build-arg CUDA_PATCH=$cuda_patch \ + -f ./Dockerfile.desktop-humble . +else + # Verify the ZED SDK format (digits.digits.digits) + if ! [[ $3 =~ ^zedsdk-[0-9]\.[0-9]\.[0-9]$ ]]; then + echo "Invalid ZED SDK version format." + exit 1 + fi + + # Remove the prefix 'zedsdk-' + zed_sdk_version_number="${ZED_SDK_version#zedsdk-}" + + # Split the string and assign to variables + IFS='.' read -r sdk_major sdk_minor sdk_patch <<< "$zed_sdk_version_number" + echo "ZED SDK $major.$minor.$patch detected." + + echo "Building dockerfile for $1, CUDA $2 and ZED SDK $3" + + docker build -t zed_ros2_desktop_u${ubuntu_major}.${ubuntu_minor}_sdk_${sdk_major}.${sdk_minor}.${sdk_patch}_cuda_${cuda_major}.${cuda_minor}.${cuda_patch} \ + --build-arg ZED_SDK_MAJOR=$sdk_major \ + --build-arg ZED_SDK_MINOR=$sdk_minor \ + --build-arg ZED_SDK_PATCH=$sdk_patch \ + --build-arg UBUNTU_MAJOR=$ubuntu_major \ + --build-arg UBUNTU_MINOR=$ubuntu_minor \ + --build-arg CUDA_MAJOR=$cuda_major \ + --build-arg CUDA_MINOR=$cuda_minor \ + --build-arg CUDA_PATCH=$cuda_patch \ + -f ./Dockerfile.desktop-humble . +fi + +########### + +# Remove the temporary folder +rm -r ./tmp_sources \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/docker/jetson_build_dockerfile_from_sdk_and_l4T_version.sh b/src/lib/zed-ros2-wrapper/docker/jetson_build_dockerfile_from_sdk_and_l4T_version.sh new file mode 100755 index 000000000..f1560b7b6 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/jetson_build_dockerfile_from_sdk_and_l4T_version.sh @@ -0,0 +1,85 @@ +#!/bin/bash +cd $(dirname $0) + +if [ "$#" -lt 2 ]; then + echo "Please enter valid L4T version and ZED SDK version has parameters. For example:" + echo "./jetson_build_dockerfile_from_sdk_and_l4T_version.sh l4t-r36.3.0 zedsdk-4.2.5" + exit 1 +fi + +# L4T version +# Verify the format (l4t-r digits.digits.digits) +if ! [[ $1 =~ ^l4t-r[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "Invalid L4T version format." + exit 1 +fi + +L4T_version=$1 +# Remove the prefix 'l4t-r' +l4t_version_number="${L4T_version#l4t-r}" + + +ZED_SDK_version=$2 + + +# copy the wrapper content +rm -r ./tmp_sources +mkdir -p ./tmp_sources +cp -r ../zed* ./tmp_sources + +# Split the string and assign to variables +IFS='.' read -r l4t_major l4t_minor l4t_patch <<< "$l4t_version_number" +########### + +L4T_VERSION=${1:-l4t-r${l4t_major}.${l4t_minor}.${l4t_patch}} + +# Determine the IMAGE_NAME based on the L4T_VERSION +if [[ "$L4T_VERSION" == *"l4t-r36.4"* ]]; then + IMAGE_NAME="dustynv/ros:humble-desktop-${L4T_VERSION}" +else + IMAGE_NAME="dustynv/ros:humble-ros-base-${L4T_VERSION}" +fi + +# Check if the third arg is a custom path +CUSTOM_ZED_SDK_URL=$2 +# Use curl to check if the URL is valid (returns HTTP 200) +if [ "$(curl -L -I "${CUSTOM_ZED_SDK_URL}" -o /dev/null -s -w '%{http_code}\n' | head -n 1)" = "200" ]; then + echo "${ZED_SDK_version} detected as a valid custom installer URL" + + echo "Building dockerfile for $1 and a custom ZED SDK" + + docker build -t zed_ros2_l4t_${l4t_major}.${l4t_minor}.${l4t_patch}_sdk_custom \ + --build-arg CUSTOM_ZED_SDK_URL=$CUSTOM_ZED_SDK_URL \ + --build-arg L4T_VERSION=$1 \ + --build-arg L4T_MAJOR=$l4t_major \ + --build-arg L4T_MINOR=$l4t_minor \ + --build-arg IMAGE_NAME=$IMAGE_NAME \ + -f ./Dockerfile.l4t-humble . +else + # Verify the ZED SDK format (digits.digits.digits) + if ! [[ $2 =~ ^zedsdk-[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + echo "Invalid ZED SDK version format." + exit 1 + fi + + # Remove the prefix 'zedsdk-' + zed_sdk_version_number="${ZED_SDK_version#zedsdk-}" + + # Split the string and assign to variables + IFS='.' read -r sdk_major sdk_minor sdk_patch <<< "$zed_sdk_version_number" + + echo "Building dockerfile for $1 and ZED SDK $2" + + docker build -t zed_ros2_l4t_${l4t_major}.${l4t_minor}.${l4t_patch}_sdk_${sdk_major}.${sdk_minor}.${sdk_patch} \ + --build-arg ZED_SDK_MAJOR=$sdk_major \ + --build-arg ZED_SDK_MINOR=$sdk_minor \ + --build-arg ZED_SDK_PATCH=$sdk_patch \ + --build-arg L4T_VERSION=$1 \ + --build-arg L4T_MAJOR=$l4t_major \ + --build-arg L4T_MINOR=$l4t_minor \ + --build-arg IMAGE_NAME=$IMAGE_NAME \ + -f ./Dockerfile.l4t-humble . +fi + +# Remove the temporary folder +rm -r ./tmp_sources \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/docker/ros_entrypoint.sh b/src/lib/zed-ros2-wrapper/docker/ros_entrypoint.sh new file mode 100644 index 000000000..09c0b2b51 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/ros_entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e + +# setup ros2 environment +source "/opt/ros/$ROS_DISTRO/setup.bash" -- +source "/root/ros2_ws/install/local_setup.bash" -- + +export ROS_DOMAIN_ID=0 + +# Welcome information +echo "ZED ROS2 Docker Image" +echo "---------------------" +echo 'ROS distro: ' $ROS_DISTRO +echo 'DDS middleware: ' $RMW_IMPLEMENTATION +echo 'ROS 2 Workspaces:' $COLCON_PREFIX_PATH +echo 'ROS 2 Domain ID:' $ROS_DOMAIN_ID +echo ' * Note: Host and Docker image Domain ID must match to allow communication' +echo 'Local IPs:' $(hostname -I) +echo "---" +echo 'Available ZED packages:' +ros2 pkg list | grep zed +echo "---------------------" +echo 'To start a ZED camera node:' +echo ' ros2 launch zed_wrapper zed_camera.launch.py camera_model:=' +echo "---------------------" +exec "$@" diff --git a/src/lib/zed-ros2-wrapper/docker/ros_entrypoint_jetson.sh b/src/lib/zed-ros2-wrapper/docker/ros_entrypoint_jetson.sh new file mode 100644 index 000000000..2899d1e9d --- /dev/null +++ b/src/lib/zed-ros2-wrapper/docker/ros_entrypoint_jetson.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e + +# setup ros2 environment +source "/opt/ros/$ROS_DISTRO/install/setup.bash" +source "/root/ros2_ws/install/local_setup.bash" + +export ROS_DOMAIN_ID=0 + +# Welcome information +echo "ZED ROS2 Docker Image" +echo "---------------------" +echo 'ROS distro: ' $ROS_DISTRO +echo 'DDS middleware: ' $RMW_IMPLEMENTATION +echo 'ROS 2 Workspaces:' $COLCON_PREFIX_PATH +echo 'ROS 2 Domain ID:' $ROS_DOMAIN_ID +echo ' * Note: Host and Docker image Domain ID must match to allow communication' +echo 'Local IPs:' $(hostname -I) +echo "---" +echo 'Available ZED packages:' +ros2 pkg list | grep zed +echo "---------------------" +echo 'To start a ZED camera node:' +echo ' ros2 launch zed_wrapper zed_camera.launch.py camera_model:=' +echo "---------------------" +exec "$@" diff --git a/src/lib/zed-ros2-wrapper/images/.gitkeep b/src/lib/zed-ros2-wrapper/images/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/src/lib/zed-ros2-wrapper/images/Picto+STEREOLABS_Black.jpg b/src/lib/zed-ros2-wrapper/images/Picto+STEREOLABS_Black.jpg new file mode 100644 index 000000000..060e24357 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/Picto+STEREOLABS_Black.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/PointCloud_Depth_ROS.jpg b/src/lib/zed-ros2-wrapper/images/PointCloud_Depth_ROS.jpg new file mode 100644 index 000000000..467a3dedd Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/PointCloud_Depth_ROS.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/depth.jpg b/src/lib/zed-ros2-wrapper/images/depth.jpg new file mode 100644 index 000000000..542ed9d03 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/depth.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/nitros-graph.gif b/src/lib/zed-ros2-wrapper/images/nitros-graph.gif new file mode 100644 index 000000000..bd7e15cc6 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/nitros-graph.gif differ diff --git a/src/lib/zed-ros2-wrapper/images/point_cloud.jpg b/src/lib/zed-ros2-wrapper/images/point_cloud.jpg new file mode 100644 index 000000000..63fdd258d Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/point_cloud.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/rgb.jpg b/src/lib/zed-ros2-wrapper/images/rgb.jpg new file mode 100644 index 000000000..58de75ab8 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/rgb.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/sim_rviz.jpg b/src/lib/zed-ros2-wrapper/images/sim_rviz.jpg new file mode 100644 index 000000000..cbd120e14 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/sim_rviz.jpg differ diff --git a/src/lib/zed-ros2-wrapper/images/zed_shelves.jpg b/src/lib/zed-ros2-wrapper/images/zed_shelves.jpg new file mode 100644 index 000000000..bcb7bbda1 Binary files /dev/null and b/src/lib/zed-ros2-wrapper/images/zed_shelves.jpg differ diff --git a/src/lib/zed-ros2-wrapper/zed_components/CMakeLists.txt b/src/lib/zed-ros2-wrapper/zed_components/CMakeLists.txt new file mode 100644 index 000000000..df6b8fa2e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/CMakeLists.txt @@ -0,0 +1,332 @@ +cmake_minimum_required(VERSION 3.8) +project(zed_components) + +################################################ +## Generate symbols for IDE indexer (VSCode) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +################################################ +# Check the ROS2 version + +set(ROS2_FOUND FALSE) +if(DEFINED ENV{ROS_DISTRO}) + set(FOUND_ROS2_DISTRO $ENV{ROS_DISTRO}) + set(ROS2_FOUND TRUE) + #message("* Found ROS2 ${FOUND_ROS2_DISTRO}") +else() + message("* ROS2 distro variable not set. Trying to figure it out...") + set(ROS2_DISTROS "ardent;bouncy;crystal;dashing;eloquent;foxy;galactic;humble;iron;jazzy;kilted;rolling") + set(ROS2_FOUND FALSE) + foreach(distro ${ROS2_DISTROS}) + if(NOT ROS2_FOUND) + find_path(RCLCPP_H rclcpp.hpp PATHS /opt/ros/${distro}/include/rclcpp) + if(RCLCPP_H) + #message("* Found ROS2 ${distro}") + set(FOUND_ROS2_DISTRO ${distro}) + set(ROS2_FOUND TRUE) + endif() + endif() + endforeach() +endif() + +if(ROS2_FOUND) + if(${FOUND_ROS2_DISTRO} STREQUAL "foxy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_FOXY) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "humble") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_HUMBLE) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "iron") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_IRON) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "jazzy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_JAZZY) + else() + message("*** WARNING *** Unsupported ROS2 ${FOUND_ROS2_DISTRO}. '${PROJECT_NAME}' may not work correctly.") + endif() +else() + message("*** WARNING *** ROS2 distro is unknown. This package could not work correctly.") +endif() +################################################ + +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++17 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +# if CMAKE_BUILD_TYPE is not specified, take 'Release' as default +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +if(CMAKE_BUILD_TYPE MATCHES Release) + #message(" * Release Mode") + add_compile_options(-Wno-deprecated-declarations) +endif() + +if(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) + #message(" * Release with Debug Info Mode") + add_compile_options(-Wno-deprecated-declarations) +endif() + +if(CMAKE_BUILD_TYPE MATCHES Debug) + message(" * Debug Mode") +endif() + +############################################# +# Dependencies +find_package(nlohmann_json REQUIRED) +find_package(ZED REQUIRED) + +exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) +if(CMAKE_SYSTEM_NAME2 MATCHES "aarch64") # Jetson TX + set(CUDA_USE_STATIC_CUDA_RUNTIME OFF) +endif() + +find_package(CUDA REQUIRED) + +set(DEPENDENCIES + rclcpp + rclcpp_components + image_transport + builtin_interfaces + std_msgs + rosgraph_msgs + tf2 + tf2_ros + tf2_geometry_msgs + geometry_msgs + nav_msgs + nmea_msgs + geographic_msgs + sensor_msgs + stereo_msgs + zed_msgs + std_srvs + diagnostic_msgs + diagnostic_updater + visualization_msgs + shape_msgs + robot_localization + backward_ros +) + +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() + +find_package(rcutils REQUIRED) +find_package(zed_msgs) +find_package(rclcpp REQUIRED) +find_package(rclcpp_components REQUIRED) +find_package(builtin_interfaces REQUIRED) +find_package(std_msgs REQUIRED) +find_package(rosgraph_msgs REQUIRED) +find_package(tf2 REQUIRED) +find_package(tf2_ros REQUIRED) +find_package(tf2_geometry_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(nav_msgs REQUIRED) +find_package(nmea_msgs REQUIRED) +find_package(geographic_msgs REQUIRED) +find_package(sensor_msgs REQUIRED) +find_package(stereo_msgs REQUIRED) +find_package(image_transport REQUIRED) +find_package(std_srvs REQUIRED) +find_package(diagnostic_msgs REQUIRED) +find_package(diagnostic_updater REQUIRED) +find_package(visualization_msgs REQUIRED) +find_package(shape_msgs REQUIRED) +find_package(robot_localization REQUIRED) + +# Debug +find_package(backward_ros REQUIRED) + +# Check ISAAC ROS Nitros availability +find_package(isaac_ros_common QUIET) +if(isaac_ros_common_FOUND) + find_package(isaac_ros_nitros) + if(isaac_ros_nitros_FOUND) + find_package(isaac_ros_managed_nitros QUIET) + if(isaac_ros_managed_nitros_FOUND) + find_package(isaac_ros_nitros_image_type QUIET) + if(isaac_ros_nitros_image_type_FOUND) + list(APPEND DEPENDENCIES isaac_ros_common) + list(APPEND DEPENDENCIES isaac_ros_nitros) + list(APPEND DEPENDENCIES isaac_ros_managed_nitros) + list(APPEND DEPENDENCIES isaac_ros_nitros_image_type) + add_definitions(-DFOUND_ISAAC_ROS_NITROS) + message(" * ISAAC ROS Nitros transport is available") + else() + message(WARNING " * 'isaac_ros_nitros_image_type' not found, ISAAC ROS NITROS transport will not be available.") + endif() + else() + message(WARNING " * 'isaac_ros_managed_nitros' not found, ISAAC ROS NITROS transport will not be available.") + endif() + else() + message(WARNING " * 'isaac_ros_nitros' not found, ISAAC ROS NITROS transport will not be available.") + endif() +#else() +# message(" * 'isaac_ros_common' not found, ISAAC ROS NITROS transport will not be available.") +endif() + +# Check Point Cloud Transport availability +find_package(point_cloud_transport QUIET) +if(point_cloud_transport_FOUND) + list(APPEND DEPENDENCIES point_cloud_transport) + message(" * Point Cloud Transport is available") + add_definitions(-DFOUND_POINT_CLOUD_TRANSPORT) +endif() + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + ament_lint_auto_find_test_dependencies() +endif() + +############################################################################### +#Add all files in subdirectories of the project in +# a dummy_target so qtcreator have access to all files +file(GLOB_RECURSE all_files ${CMAKE_CURRENT_SOURCE_DIR}/*) +add_custom_target(all_${PROJECT_NAME}_files SOURCES ${all_files}) + +############################################################################### +# LIBS + + +# create ament index resource which references the libraries in the binary dir +set(node_plugins "") + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) + +set(ZED_LIBS + ${ZED_LIBRARIES} + ${CUDA_LIBRARIES} +) + +############################################################################### +# SOURCES +set(SL_TOOLS_INC + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/sl_tools.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/sl_win_avg.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/sl_logging.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/gnss_replay.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/sl_types.hpp +) + +set(SL_TOOLS_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/src/sl_tools.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/src/sl_win_avg.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/src/gnss_replay.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/src/sl_types.cpp +) + +set(ZED_CAMERA_INC + ${CMAKE_CURRENT_SOURCE_DIR}/src/include/visibility_control.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/include/sl_version.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/include/zed_camera_component.hpp +) + +set(ZED_CAMERA_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/src/zed_camera_component_main.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/src/zed_camera_component_video_depth.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/src/zed_camera_component_objdet.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/src/zed_camera_component_bodytrk.cpp +) + +set(ZED_CAMERA_ONE_INC + ${CMAKE_CURRENT_SOURCE_DIR}/src/include/visibility_control.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/include/sl_version.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/include/zed_camera_one_component.hpp +) + +set(ZED_CAMERA_ONE_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/src/zed_camera_one_component_main.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/src/zed_camera_one_component_sensors.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/src/zed_camera_one_component_video.cpp +) +############################################################################### +# Bin and Install + +# ZED Camera Component +add_library(zed_camera_component SHARED + ${SL_TOOLS_INC} + ${SL_TOOLS_SRC} + ${ZED_CAMERA_INC} + ${ZED_CAMERA_SRC} +) +target_include_directories(zed_camera_component PUBLIC + ${CUDA_INCLUDE_DIRS} + ${ZED_INCLUDE_DIRS} + ${CMAKE_CURRENT_SOURCE_DIR}/src/include + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/include + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include +) +target_compile_definitions(zed_camera_component + PRIVATE "COMPOSITION_BUILDING_DLL" +) +target_link_libraries(zed_camera_component + ${ZED_LIBS} +) +ament_target_dependencies(zed_camera_component + ${DEPENDENCIES} +) + +rclcpp_components_register_nodes(zed_camera_component "stereolabs::ZedCamera") +set(node_plugins "${node_plugins}stereolabs::ZedCamera;$\n") + +# ZED Camera One Component +add_library(zed_camera_one_component SHARED + ${SL_TOOLS_INC} + ${SL_TOOLS_SRC} + ${ZED_CAMERA_ONE_INC} + ${ZED_CAMERA_ONE_SRC} +) +target_include_directories(zed_camera_one_component PUBLIC + ${CUDA_INCLUDE_DIRS} + ${ZED_INCLUDE_DIRS} + ${CMAKE_CURRENT_SOURCE_DIR}/src/include + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/include + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include +) +target_compile_definitions(zed_camera_one_component + PRIVATE "COMPOSITION_BUILDING_DLL" +) +target_link_libraries(zed_camera_one_component + ${ZED_LIBS} +) +ament_target_dependencies(zed_camera_one_component + ${DEPENDENCIES} +) + +rclcpp_components_register_nodes(zed_camera_one_component "stereolabs::ZedCameraOne") +set(node_plugins "${node_plugins}stereolabs::ZedCameraOne;$\n") + +# Install components +install(TARGETS zed_camera_component zed_camera_one_component + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION lib/${PROJECT_NAME} +) + +# Install header files +install(DIRECTORY + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera/include/ + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_camera_one/include/ + ${CMAKE_CURRENT_SOURCE_DIR}/src/tools/include/ + ${CMAKE_CURRENT_SOURCE_DIR}/src/include/ + DESTINATION include/${PROJECT_NAME}/ +) + +ament_export_include_directories(include) +ament_export_libraries( + zed_camera_component + zed_camera_one_component +) +ament_export_dependencies(${DEPENDENCIES}) +ament_package() diff --git a/src/lib/zed-ros2-wrapper/zed_components/package.xml b/src/lib/zed-ros2-wrapper/zed_components/package.xml new file mode 100644 index 000000000..1e3407edf --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/package.xml @@ -0,0 +1,59 @@ + + + + zed_components + 5.2.2 + Contains the main ROS 2 components to use a camera of the Stereolabs ZED family + STEREOLABS + Apache License 2.0 + https://www.stereolabs.com/ + https://github.com/stereolabs/zed-ros2-wrapper + https://github.com/stereolabs/zed-ros2-wrapper/issues + + ament_cmake + + ament_cmake_auto + + backward_ros + nlohmann-json-dev + zed_msgs + rclcpp + rclcpp_components + rcutils + builtin_interfaces + std_msgs + rosgraph_msgs + stereo_msgs + sensor_msgs + geometry_msgs + nav_msgs + nmea_msgs + geographic_msgs + tf2 + tf2_ros + tf2_geometry_msgs + image_transport + std_srvs + diagnostic_msgs + diagnostic_updater + visualization_msgs + robot_localization + + launch_ros + image_transport_plugins + compressed_image_transport + compressed_depth_image_transport + theora_image_transport + + ament_lint_auto + ament_cmake_copyright + ament_cmake_cppcheck + ament_cmake_lint_cmake + ament_cmake_pep257 + ament_cmake_uncrustify + ament_cmake_xmllint + + + ament_cmake + + diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/include/sl_version.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/include/sl_version.hpp new file mode 100644 index 000000000..b71487f59 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/include/sl_version.hpp @@ -0,0 +1,33 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef VERSION_HPP_ +#define VERSION_HPP_ + +#include + +namespace stereolabs +{ +const size_t WRAPPER_MAJOR = 5; +const size_t WRAPPER_MINOR = 2; +const size_t WRAPPER_PATCH = 4; + +const size_t SDK_MAJOR_MIN_SUPP = 4; +const size_t SDK_MINOR_MIN_SUPP = 2; + +const size_t SDK_MAJOR_MAX_SUPP = 5; +const size_t SDK_MINOR_MAX_SUPP = 2; +} // namespace stereolabs + +#endif // VERSION_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/include/visibility_control.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/include/visibility_control.hpp new file mode 100644 index 000000000..c5afeeb5d --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/include/visibility_control.hpp @@ -0,0 +1,61 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef VISIBILITY_CONTROL_HPP_ +#define VISIBILITY_CONTROL_HPP_ + +/* *INDENT-OFF* */ +#ifdef __cplusplus +extern "C" { +#endif +/* *INDENT-ON* */ + +// This logic was borrowed (then namespaced) from the examples on the gcc wiki: +// https://gcc.gnu.org/wiki/Visibility + +#if defined _WIN32 || defined __CYGWIN__ +#ifdef __GNUC__ +#define ZED_COMPONENTS_EXPORT __attribute__((dllexport)) +#define ZED_COMPONENTS_IMPORT __attribute__((dllimport)) +#else +#define ZED_COMPONENTS_EXPORT __declspec(dllexport) +#define ZED_COMPONENTS_IMPORT __declspec(dllimport) +#endif +#ifdef ZED_COMPONENTS_BUILDING_DLL +#define ZED_COMPONENTS_PUBLIC ZED_COMPONENTS_EXPORT +#else +#define ZED_COMPONENTS_PUBLIC ZED_COMPONENTS_IMPORT +#endif +#define ZED_COMPONENTS_PUBLIC_TYPE ZED_COMPONENTS_PUBLIC +#define ZED_COMPONENTS_LOCAL +#else +#define ZED_COMPONENTS_EXPORT __attribute__((visibility("default"))) +#define ZED_COMPONENTS_IMPORT +#if __GNUC__ >= 4 +#define ZED_COMPONENTS_PUBLIC __attribute__((visibility("default"))) +#define ZED_COMPONENTS_LOCAL __attribute__((visibility("hidden"))) +#else +#define ZED_COMPONENTS_PUBLIC +#define ZED_COMPONENTS_LOCAL +#endif +#define ZED_COMPONENTS_PUBLIC_TYPE +#endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus +} +#endif +/* *INDENT-ON* */ + +#endif // VISIBILITY_CONTROL_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/gnss_replay.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/gnss_replay.hpp new file mode 100644 index 000000000..47460a430 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/gnss_replay.hpp @@ -0,0 +1,38 @@ +#ifndef GPSD_Replay_H +#define GPSD_Replay_H + +#include +#include +#include + +namespace sl_tools +{ + +/** + * @brief GNSSReplay is a common interface that read GNSS saved data + */ +class GNSSReplay +{ +public: + explicit GNSSReplay(const std::shared_ptr & zed); + ~GNSSReplay(); + bool initialize(); + void close(); + + sl::FUSION_ERROR_CODE grab(sl::GNSSData & current_data, uint64_t current_timestamp); + +protected: + sl::GNSSData getNextGNSSValue(uint64_t current_timestamp); + +private: + unsigned _current_gnss_idx = 0; + unsigned long long _previous_ts = 0; + unsigned long long _last_cam_ts = 0; + nlohmann::json _gnss_data; + + std::shared_ptr _zed; +}; + +} // namespace sl_tools + +#endif diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_logging.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_logging.hpp new file mode 100644 index 000000000..ad6cb6fa1 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_logging.hpp @@ -0,0 +1,210 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SL_LOGGING_HPP +#define SL_LOGGING_HPP + +#include + +// ----> DEBUG MACROS +// Common +#define DEBUG_COMM(...) \ + if (_debugCommon) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_COMM(stream_arg) \ + if (_debugCommon) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_COMM_ONCE(stream_arg) \ + if (_debugCommon) RCLCPP_DEBUG_STREAM_ONCE(get_logger(), stream_arg) + +// Dynamic parameters +#define DEBUG_DYN_PARAMS(...) \ + if (_debugDynParams) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_DYN_PARAMS(stream_arg) \ + if (_debugDynParams) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_DYN_PARAMS_ONCE(stream_arg) \ + if (_debugDynParams) RCLCPP_DEBUG_STREAM_ONCE(get_logger(), stream_arg) + +// Grab (low level) +#define DEBUG_GRAB(...) \ + if (_debugGrab) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_GRAB(stream_arg) \ + if (_debugGrab) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_GRAB_ONCE(stream_arg) \ + if (_debugGrab) RCLCPP_DEBUG_STREAM_ONCE(get_logger(), stream_arg) + +// Simulation +#define DEBUG_SIM(...) \ + if (_debugSim) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_SIM(...) \ + if (_debugSim) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_SIM(stream_arg) \ + if (_debugSim) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_SIM(duration, stream_arg) \ + if (_debugSim) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Advanced +#define DEBUG_ADV(...) \ + if (_debugAdvanced) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_ADV(...) \ + if (_debugAdvanced) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_ADV(stream_arg) \ + if (_debugAdvanced) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_ADV(duration, stream_arg) \ + if (_debugAdvanced) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Video Depth +#define DEBUG_VD(...) \ + if (_debugVideoDepth) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_VD(stream_arg) \ + if (_debugVideoDepth) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) + +// Camera Controls settings +#define DEBUG_CTRL(...) \ + if (_debugCamCtrl) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_CTRL(stream_arg) \ + if (_debugCamCtrl) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) + +// Point Cloud +#define DEBUG_PC(...) \ + if (_debugPointCloud) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_PC(stream_arg) \ + if (_debugPointCloud) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) + +// TF +#define DEBUG_TF(...) \ + if (_debugTf) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_TF(...) \ + if (_debugTf) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_TF(stream_arg) \ + if (_debugTf) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_TF(duration, stream_arg) \ + if (_debugTf) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Positional Tracking +#define DEBUG_PT(...) \ + if (_debugPosTracking) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_PT(...) \ + if (_debugPosTracking) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_PT(stream_arg) \ + if (_debugPosTracking) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_PT(duration, stream_arg) \ + if (_debugPosTracking) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// GNSS integration +#define DEBUG_GNSS(...) \ + if (_debugGnss) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_GNSS(stream_arg) \ + if (_debugGnss) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_GNSS(duration, stream_arg) \ + if (_debugGnss) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Sensors +#define DEBUG_SENS(...) \ + if (_debugSensors) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_SENS(stream_arg) \ + if (_debugSensors) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_ONCE_SENS(stream_arg) \ + if (_debugSensors) RCLCPP_DEBUG_STREAM_ONCE(get_logger(), stream_arg) + +// Mapping +#define DEBUG_MAP(...) \ + if (_debugMapping) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_MAP(stream_arg) \ + if (_debugMapping) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_ONCE_MAP(stream_arg) \ + if (_debugMapping) RCLCPP_DEBUG_STREAM_ONCE(get_logger(), stream_arg) + +// Object Detection +#define DEBUG_OD(...) \ + if (_debugObjectDet) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_OD(stream_arg) \ + if (_debugObjectDet) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) + +// Body Tracking +#define DEBUG_BT(...) \ + if (_debugBodyTrk) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_BT(stream_arg) \ + if (_debugBodyTrk) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) + +// Region of Interest +#define DEBUG_ROI(...) \ + if (_debugRoi) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_ROI(...) \ + if (_debugRoi) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_ROI(stream_arg) \ + if (_debugRoi) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_ROI(duration, stream_arg) \ + if (_debugRoi) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Streaming +#define DEBUG_STR(...) \ + if (_debugStreaming) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_STR(...) \ + if (_debugStreaming) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_STR(stream_arg) \ + if (_debugStreaming) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_STR(duration, stream_arg) \ + if (_debugStreaming) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } + +// Nitros +#define DEBUG_NITROS(...) \ + if (_debugNitros) RCLCPP_DEBUG(get_logger(), __VA_ARGS__) +#define DEBUG_ONCE_NITROS(...) \ + if (_debugNitros) RCLCPP_DEBUG_ONCE(get_logger(), __VA_ARGS__) +#define DEBUG_STREAM_NITROS(stream_arg) \ + if (_debugNitros) RCLCPP_DEBUG_STREAM(get_logger(), stream_arg) +#define DEBUG_STREAM_THROTTLE_NITROS(duration, stream_arg) \ + if (_debugNitros) { \ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); \ + RCLCPP_DEBUG_STREAM_THROTTLE( \ + get_logger(), steady_clock, duration, \ + stream_arg); \ + } +// <---- DEBUG MACROS + +#endif // SL_LOGGING_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_tools.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_tools.hpp new file mode 100644 index 000000000..f5ddc834e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_tools.hpp @@ -0,0 +1,405 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SL_TOOLS_HPP_ +#define SL_TOOLS_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gnss_replay.hpp" +#include "sl_win_avg.hpp" + +#include + +// Used to enable ZED SDK RealTime SVO pause +//#define USE_SVO_REALTIME_PAUSE + +// Used to enable Positional Tracking lock check +//#define ENABLE_PT_LOCK_CHECK + +// CUDA includes and macros +#ifdef FOUND_ISAAC_ROS_NITROS + #include "isaac_ros_nitros_image_type/nitros_image_builder.hpp" + + #define CUDA_CHECK(status) \ + if (status != cudaSuccess) \ + { \ + RCLCPP_ERROR_STREAM( \ + get_logger(), "Internal CUDA ERROR encountered: {" << std::string( \ + cudaGetErrorName( \ + status)) << "} {" << std::string(cudaGetErrorString(status)) << "}"); \ + std::abort(); \ + } +#endif + +namespace sl_tools +{ + +/*! \brief Get a parameter from the node + * \param node : the node to get the parameter from + * \param paramName : the name of the parameter + * \param defValue : the default value of the parameter + * \param outVal : the output value of the parameter + * \param log_info : the info to log + * \param dynamic : if the parameter is dynamic + * \param min : the minimum value of the parameter + * \param max : the maximum value of the parameter + * \tparam T : the type of the parameter + */ +template +void getParam( + const std::shared_ptr node, + std::string paramName, T defValue, T & outVal, + std::string log_info = std::string(), bool dynamic = false, + T minVal = std::numeric_limits::min(), T maxVal = std::numeric_limits::max()); + +/*! \brief Convert a sl::float3 to a vector of float + * \param r : the sl::float3 to convert + * \return a vector of float + */ +std::vector convertRodrigues(sl::float3 r); + +/*! + * @brief Get the full file path from a relative file name + * @param file_name the relative file name + * @return the full file path + */ +std::string getFullFilePath(const std::string & file_name); + +/*! \brief Get Stereolabs SDK version + * \param major : major value for version + * \param minor : minor value for version + * \param sub_minor _ sub_minor value for version + */ +std::string getSDKVersion(int & major, int & minor, int & sub_minor); + +/*! \brief Convert StereoLabs timestamp to ROS timestamp + * \param t : Stereolabs timestamp to be converted + * \param t : ROS 2 clock type + */ +rclcpp::Time slTime2Ros(sl::Timestamp t, rcl_clock_type_t clock_type = RCL_ROS_TIME); + +/*! \brief check if ZED + * \param camModel the model to check + */ +bool isZED(sl::MODEL camModel); + +/*! \brief check if ZED Mini + * \param camModel the model to check + */ +bool isZEDM(sl::MODEL camModel); + +/*! \brief check if ZED2 or ZED2i + * \param camModel the model to check + */ +bool isZED2OrZED2i(sl::MODEL camModel); + +/*! \brief check if ZED-X or ZED-X Mini + * \param camModel the model to check + */ +bool isZEDX(sl::MODEL camModel); + +/*! \brief check if Object Detection is available + * \param camModel the camera model to check + */ +bool isObjDetAvailable(sl::MODEL camModel); + +/*! \brief sl::Mat to ros message conversion + * \param img : the image to publish + * \param frameId : the id of the reference frame of the image + * \param t : rclcpp ros::Time to stamp the image + * \param use_pub_timestamp : if true use the current time as timestamp instead of \ref t + */ +std::unique_ptr imageToROSmsg( + const sl::Mat & img, const std::string & frameId, const rclcpp::Time & t, bool use_pub_timestamp); + +/*! \brief sl::Mat to ros message conversion + * \param left : the left image to convert and stitch + * \param right : the right image to convert and stitch + * \param frameId : the id of the reference frame of the image + * \param t : rclcpp rclcpp::Time to stamp the image + * \param use_pub_timestamp : if true use the current time as timestamp instead of \ref t + */ +std::unique_ptr imagesToROSmsg( + const sl::Mat & left, const sl::Mat & right, const std::string & frameId, + const rclcpp::Time & t, bool use_pub_timestamp); + +/*! \brief qos value to string + * \param qos the value to convert + */ +std::string qos2str(rmw_qos_history_policy_t qos); + +/*! \brief qos value to string + * \param qos the value to convert + */ +std::string qos2str(rmw_qos_reliability_policy_t qos); + +/*! \brief qos value to string + * \param qos the value to convert + */ +std::string qos2str(rmw_qos_durability_policy_t qos); + +/*! \brief Creates an sl::Mat containing a ROI from a polygon + * \param poly the ROI polygon. Coordinates must be normalized from 0.0 to 1.0 + * \param out_roi the `sl::Mat` containing the ROI + */ +bool generateROI(const std::vector & poly, sl::Mat & out_roi); + +/*! \brief Parse a vector of vector of floats from a string. + * \param input + * \param error_return + * Syntax is [[1.0, 2.0], [3.3, 4.4, 5.5], ...] */ +std::vector> parseStringMultiVector_float( + const std::string & input, std::string & error_return); + +/*! \brief Parse a vector of integer values from a string. + * \param input + * \param error_return + * Syntax is [1, 2, 3, ...] */ +std::vector parseStringVector_int( + const std::string & input, + std::string & error_return); + +/*! + * @brief Convert thread policy to string + * @param thread_sched_policy + * @return policy string + */ +std::string threadSched2Str(int thread_sched_policy); + +/*! + * @brief check if root is available + * @return true if root + */ +bool checkRoot(); + +/*! + * @brief Convert seconds to string in format DD:HH:MM:SS + * @param sec seconds + * @return string + */ +std::string seconds2str(double sec); + +/*! + * @brief read custom OD labels from a COCO-Like YAML file + * @param label_file label file full path + * @param out_labels the map containing the labels. The map idx corresponds to the class ID + * @return true if successfull + */ +bool ReadCocoYaml( + const std::string & label_file, std::unordered_map & out_labels); + +/** + * @brief Stop Timer used to measure time intervals + * + */ +class StopWatch +{ +public: + explicit StopWatch(rclcpp::Clock::SharedPtr clock); + ~StopWatch() {} + + void tic(); //!< Set the reference time point to the current time + double toc(std::string func_name = std::string() ); //!< Returns the seconds elapsed from the last tic in ROS clock reference (it works also in simulation) + +private: + rclcpp::Time mStartTime; // Reference time point + rclcpp::Clock::SharedPtr mClockPtr; // Node clock interface +}; + +// ----> Utility functions + +/*! \brief Convert a string to uppercase (ASCII) + * \param s : the string to convert + * \return the uppercase string + */ +inline std::string toUpper(std::string s) +{ + std::transform( + s.begin(), s.end(), s.begin(), + [](unsigned char c) {return std::toupper(c);}); + return s; +} + +// ----> Template functions definitions + +// Match a YAML string (with underscores) to an sl:: SDK enum value. +// Case-insensitive. Iterates from `first` up to (excluding) `last`, +// comparing sl::toString() output (spaces replaced by underscores). +// Returns true on match. +template +bool matchSdkEnum( + const std::string & str, EnumT first, EnumT last, EnumT & outVal) +{ + const std::string upperStr = toUpper(str); + + for (int idx = static_cast(first); + idx < static_cast(last); ++idx) + { + EnumT candidate = static_cast(idx); + std::string candidate_str = sl::toString(candidate).c_str(); + std::replace(candidate_str.begin(), candidate_str.end(), ' ', '_'); + if (upperStr == toUpper(candidate_str)) { + outVal = candidate; + return true; + } + } + return false; +} + +/*! \brief Read a ROS parameter and match it to an SDK enum value. + * Combines getParam + matchSdkEnum + warning on mismatch + info log. + * \param node : the node to get the parameter from + * \param paramName : the ROS parameter name + * \param defaultStr : the default string value for the parameter + * \param first : first enum value to iterate from + * \param last : sentinel (exclusive) enum value + * \param outVal : receives the matched enum value (unchanged on mismatch) + * \param logLabel : label prefix for the INFO log (empty to skip logging) + * \return true if the parameter matched a valid enum value + */ +template +bool getEnumParam( + const std::shared_ptr node, + const std::string & paramName, + const std::string & defaultStr, + EnumT first, EnumT last, + EnumT & outVal, + const std::string & logLabel = std::string()) +{ + std::string str = defaultStr; + getParam(node, paramName, str, str); + bool ok = matchSdkEnum(str, first, last, outVal); + if (!ok) { + RCLCPP_WARN_STREAM( + node->get_logger(), + "The value of the parameter '" << paramName << "' is not valid: '" + << str << "'. Using the default value."); + } + if (!logLabel.empty()) { + RCLCPP_INFO_STREAM( + node->get_logger(), + logLabel << sl::toString(outVal).c_str()); + } + return ok; +} + +template +void getParam( + const std::shared_ptr node, + std::string paramName, T defValue, T & outVal, + std::string log_info, bool dynamic, + T minVal, T maxVal) +{ + rcl_interfaces::msg::ParameterDescriptor descriptor; + descriptor.read_only = !dynamic; + + std::stringstream ss; + if constexpr (std::is_same::value) { + ss << "Default value: " << (defValue ? "TRUE" : "FALSE"); + } else { + ss << "Default value: " << defValue; + } + descriptor.description = ss.str(); + + if constexpr (std::is_same::value) { + descriptor.additional_constraints = "Range: [" + std::to_string(minVal) + ", " + std::to_string( + maxVal) + "]"; + rcl_interfaces::msg::FloatingPointRange range; + range.from_value = minVal; + range.to_value = maxVal; + descriptor.floating_point_range.push_back(range); + } else if constexpr (std::is_same::value) { + descriptor.additional_constraints = "Range: [" + std::to_string(minVal) + ", " + std::to_string( + maxVal) + "]"; + rcl_interfaces::msg::IntegerRange range; + range.from_value = minVal; + range.to_value = maxVal; + descriptor.integer_range.push_back(range); + } + + node->declare_parameter(paramName, rclcpp::ParameterValue(defValue), descriptor); + + if (!node->get_parameter(paramName, outVal)) { + RCLCPP_WARN_STREAM( + node->get_logger(), + "The parameter '" + << paramName + << "' is not available or is not valid, using the default value: " + << defValue); + } + + if (!log_info.empty()) { + std::stringstream ss; + ss << log_info; + if constexpr (std::is_same::value) { + ss << (outVal ? "TRUE" : "FALSE"); + } else { + ss << outVal; + } + if (dynamic) { + ss << " [DYNAMIC]"; + } + RCLCPP_INFO_STREAM(node->get_logger(), ss.str()); + } +} +// Validate and assign a dynamic parameter within [minVal, maxVal]. +// Returns true on success. On failure, sets result.successful = false with reason. +template +bool checkParamRange( + const rclcpp::Parameter & param, + T & outVal, + T minVal, T maxVal, + rcl_interfaces::msg::SetParametersResult & result, + const rclcpp::Logger & logger) +{ + T val; + if constexpr (std::is_same_v) { + val = param.as_double(); + } else if constexpr (std::is_same_v) { + val = static_cast(param.as_double()); + } else if constexpr (std::is_same_v) { + val = param.as_int(); + } else { + static_assert( + std::is_same_v|| std::is_same_v|| std::is_same_v, + "checkParamRange only supports double, float and int"); + } + + if (val < minVal || val > maxVal) { + result.successful = false; + result.reason = param.get_name() + " must be in range [" + + std::to_string(minVal) + ", " + std::to_string(maxVal) + "]"; + RCLCPP_WARN_STREAM(logger, result.reason); + return false; + } + outVal = val; + return true; +} +// <---- Template functions definitions + +} // namespace sl_tools + +#endif // SL_TOOLS_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_types.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_types.hpp new file mode 100644 index 000000000..83eacb533 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_types.hpp @@ -0,0 +1,204 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SL_TYPES_HPP_ +#define SL_TYPES_HPP_ + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef FOUND_ISAAC_ROS_NITROS + #include "isaac_ros_managed_nitros/managed_nitros_publisher.hpp" + #include "isaac_ros_nitros_image_type/nitros_image.hpp" +#endif + +#ifdef FOUND_POINT_CLOUD_TRANSPORT + #include +#endif + +#define TIMEZERO_ROS rclcpp::Time(0, 0, RCL_ROS_TIME) +#define TIMEZERO_SYS rclcpp::Time(0, 0, RCL_SYSTEM_TIME) + +constexpr auto HEARTBEAT_INTERVAL_MS = 1000; // Publish heartbeat every second +constexpr auto TEMP_PUB_INTERVAL_MS = 1000; // Publish temperature every second + +namespace stereolabs +{ + +// ----> Global constants +const double DEG2RAD = 0.017453293; +const double RAD2DEG = 57.295777937; + +const sl::COORDINATE_SYSTEM ROS_COORDINATE_SYSTEM = + sl::COORDINATE_SYSTEM::RIGHT_HANDED_Z_UP_X_FWD; +const sl::UNIT ROS_MEAS_UNITS = sl::UNIT::METER; + +const int QOS_QUEUE_SIZE = 10; +// <---- Global constants + +#ifdef _SL_JETSON_ +const bool IS_JETSON = true; +#else +const bool IS_JETSON = false; +#endif + +const float NOT_VALID_TEMP = -273.15f; + +// ----> Typedefs to simplify declarations + +#ifdef FOUND_ISAAC_ROS_NITROS +typedef std::shared_ptr> nitrosImgPub; +#endif + +typedef std::shared_ptr camInfoMsgPtr; + +typedef rclcpp::Publisher::SharedPtr camInfoPub; +typedef rclcpp::Publisher::SharedPtr clockPub; +typedef rclcpp::Publisher::SharedPtr svoStatusPub; +typedef rclcpp::Publisher::SharedPtr healthStatusPub; +typedef rclcpp::Publisher::SharedPtr heartbeatStatusPub; + +typedef rclcpp::Publisher::SharedPtr imagePub; +typedef rclcpp::Publisher::SharedPtr disparityPub; + +typedef rclcpp::Publisher::SharedPtr pointcloudPub; + +typedef rclcpp::Publisher::SharedPtr imuPub; +typedef rclcpp::Publisher::SharedPtr magPub; +typedef rclcpp::Publisher::SharedPtr pressPub; +typedef rclcpp::Publisher::SharedPtr tempPub; + +typedef rclcpp::Publisher::SharedPtr posePub; +typedef rclcpp::Publisher::SharedPtr poseStatusPub; +typedef rclcpp::Publisher::SharedPtr gnssFusionStatusPub; +typedef rclcpp::Publisher::SharedPtr poseCovPub; +typedef rclcpp::Publisher::SharedPtr transfPub; +typedef rclcpp::Publisher::SharedPtr odomPub; +typedef rclcpp::Publisher::SharedPtr pathPub; + +typedef rclcpp::Publisher::SharedPtr objPub; +typedef rclcpp::Publisher::SharedPtr depthInfoPub; + +typedef rclcpp::Publisher::SharedPtr planePub; +typedef rclcpp::Publisher::SharedPtr markerPub; + +typedef rclcpp::Publisher::SharedPtr geoPosePub; +typedef rclcpp::Publisher::SharedPtr gnssFixPub; + +typedef rclcpp::Publisher::SharedPtr healthPub; + +typedef rclcpp::Subscription::SharedPtr clickedPtSub; +typedef rclcpp::Subscription::SharedPtr gnssFixSub; +typedef rclcpp::Subscription::SharedPtr clockSub; + +typedef rclcpp::Service::SharedPtr enableDepthPtr; + +typedef rclcpp::Service::SharedPtr resetOdomSrvPtr; +typedef rclcpp::Service::SharedPtr resetPosTrkSrvPtr; +typedef rclcpp::Service::SharedPtr setPoseSrvPtr; +typedef rclcpp::Service::SharedPtr saveAreaMemorySrvPtr; +typedef rclcpp::Service::SharedPtr enableObjDetPtr; +typedef rclcpp::Service::SharedPtr enableBodyTrkPtr; +typedef rclcpp::Service::SharedPtr enableMappingPtr; + +typedef rclcpp::Service::SharedPtr startSvoRecSrvPtr; +typedef rclcpp::Service::SharedPtr setRoiSrvPtr; +typedef rclcpp::Service::SharedPtr stopSvoRecSrvPtr; +typedef rclcpp::Service::SharedPtr pauseSvoSrvPtr; +typedef rclcpp::Service::SharedPtr setSvoFramePtr; +typedef rclcpp::Service::SharedPtr resetRoiSrvPtr; +typedef rclcpp::Service::SharedPtr toLLSrvPtr; +typedef rclcpp::Service::SharedPtr fromLLSrvPtr; +typedef rclcpp::Service::SharedPtr enableStreamingPtr; + + +/*! + * @brief Video/Depth topic resolution + */ +typedef enum +{ + NATIVE, //!< Same camera grab resolution + CUSTOM //!< Custom Rescale Factor +} PubRes; + +std::string toString(const PubRes & res); + +typedef enum +{ + PUB, //!< Same resolution as Color and Depth Map. [Old behavior for compatibility] + FULL, //!< Full resolution. Not recommended because slow processing and high bandwidth requirements + COMPACT, //!< Standard resolution. Optimizes processing and bandwidth + REDUCED //!< Half resolution. Low processing and bandwidth requirements +} PcRes; + +std::string toString(const PcRes & res); + +const int NEURAL_W = 896; +const int NEURAL_H = 512; +// <---- Typedefs to simplify declarations + +} // namespace stereolabs + +#endif // SL_TYPES_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_win_avg.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_win_avg.hpp new file mode 100644 index 000000000..8a9bf6672 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/include/sl_win_avg.hpp @@ -0,0 +1,51 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SL_WIN_AVG_HPP_ +#define SL_WIN_AVG_HPP_ + +#include // size_t +#include // std::dequeue +#include + +namespace sl_tools +{ + +class WinAvg +{ +public: + explicit WinAvg(size_t win_size = 15); + ~WinAvg(); + + double setNewSize(size_t win_size); + double addValue(double val); + + /// @brief Get the current average of the stored values + /// @return average of the stored values + double getAvg(); + + inline size_t size() {return mVals.size();} + +private: + size_t mWinSize; + + std::deque mVals; // The values in the queue used to calculate the windowed average + double mSumVals = 0.0; // The updated sum of the values in the queue + + std::mutex mQueueMux; +}; + +} + +#endif // SL_WIN_AVG_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/gnss_replay.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/gnss_replay.cpp new file mode 100644 index 000000000..f058d784a --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/gnss_replay.cpp @@ -0,0 +1,328 @@ +#include "gnss_replay.hpp" + +using json = nlohmann::json; + +namespace sl_tools +{ + +inline bool is_microseconds(uint64_t timestamp) +{ + // Check if the timestamp is in microseconds + return 1'000'000'000'000'000 <= timestamp && timestamp < 10'000'000'000'000'000ULL; +} + +inline bool is_nanoseconds(uint64_t timestamp) +{ + // Check if the timestamp is in microseconds + return 1'000'000'000'000'000'000 <= timestamp && timestamp < 10'000'000'000'000'000'000ULL; +} + +GNSSReplay::GNSSReplay(const std::shared_ptr & zed) {_zed = zed;} + +GNSSReplay::~GNSSReplay() {close();} + +bool GNSSReplay::initialize() +{ + auto svo_custom_data_keys = _zed->getSVODataKeys(); + std::string gnss_key = "GNSS_json"; + bool found = false; + for (auto & it : svo_custom_data_keys) { + if (it.find(gnss_key) != std::string::npos) { + found = true; + break; + } + } + + if (!found) { + return false; + } + + std::map data; + auto status = _zed->retrieveSVOData(gnss_key, data); // Get ALL + + /* + We handle 2 formats: + * + * { + "coordinates": { + "latitude": XXX, + "longitude": XXX, + "altitude": XXX + }, + "ts": 1694263390000000, + "latitude_std": 0.51, + "longitude_std": 0.51, + "altitude_std": 0.73, + "position_covariance": [ + 0.2601, + 0, + 0, + 0, + 0.2601, + 0, + 0, + 0, + 0.5328999999999999 + ] + }, + ========= + * Or + * this one will be converted to the format above + { + "Eph": 0.467, + "EpochTimeStamp": 1694266998000000, + "Epv": 0.776, + "Geopoint": { + "Altitude": XXX, + "Latitude": XXX, + "Longitude": XXX + }, + "Position": [ + [ + XXX, + XXX, + XXX + ] + ], + "Velocity": [ + [ + -0.63, + 0.25, + 0.53 + ] + ] + } + */ + + auto tmp_array = json::array(); + for (auto & it : data) { + try { + auto _gnss_data_point = + json::parse(it.second.content.begin(), it.second.content.end()); + auto _gnss_data_point_formatted = json::object(); + + if (!_gnss_data_point["Geopoint"].is_null()) { + _gnss_data_point_formatted["coordinates"] = { + {"latitude", _gnss_data_point["Geopoint"]["Latitude"]}, + {"longitude", _gnss_data_point["Geopoint"]["Longitude"]}, + {"altitude", _gnss_data_point["Geopoint"]["Altitude"]}, + }; + _gnss_data_point_formatted["ts"] = _gnss_data_point["EpochTimeStamp"]; + + float latitude_std = _gnss_data_point["Eph"]; + float longitude_std = _gnss_data_point["Eph"]; + float altitude_std = _gnss_data_point["Epv"]; + + _gnss_data_point_formatted["latitude_std"] = latitude_std; + _gnss_data_point_formatted["longitude_std"] = longitude_std; + _gnss_data_point_formatted["altitude_std"] = altitude_std; + + _gnss_data_point_formatted["position_covariance"] = + json::array( + {longitude_std + longitude_std, 0, 0, 0, + latitude_std + latitude_std, 0, 0, 0, + altitude_std + altitude_std}); + + _gnss_data_point_formatted["original__gnss_data"] = _gnss_data_point; + + } else if (!_gnss_data_point["coordinates"].is_null() && + !_gnss_data_point["latitude_std"].is_null() && + !_gnss_data_point["longitude_std"].is_null()) + { + // no conversion + _gnss_data_point_formatted = _gnss_data_point; + } + + tmp_array.push_back(_gnss_data_point_formatted); + + } catch (const std::runtime_error & e) { + std::cerr << "Error while reading GNSS data: " << e.what() << std::endl; + } + } + _gnss_data["GNSS"] = tmp_array; + + _current_gnss_idx = 0; + _previous_ts = 0; + + return true; +} + +void GNSSReplay::close() +{ + _gnss_data.clear(); + _current_gnss_idx = 0; +} + + +inline std::string gps_mode2str(int status) +{ + std::string out; + switch (status) { + case 1: + out = "STATUS_GPS"; + break; + case 2: + out = "STATUS_DGPS"; + break; + case 3: + out = "STATUS_RTK_FIX"; + break; + case 4: + out = "STATUS_RTK_FLT"; + break; + case 5: + out = "STATUS_DR"; + break; + case 6: + out = "STATUS_GNSSDR"; + break; + case 7: + out = "STATUS_TIME"; + break; + case 8: + out = "STATUS_SIM"; + break; + case 9: + out = "STATUS_PPS_FIX"; + break; + default: + case 0: + out = "STATUS_UNK"; + break; + } + return out; +} + +sl::GNSSData getGNSSData(json & _gnss_data, int gnss_idx) +{ + sl::GNSSData current__gnss_data; + current__gnss_data.ts = 0; + + // If we are at the end of GNSS data, exit + if (gnss_idx >= _gnss_data["GNSS"].size()) { + std::cout << "Reached the end of the GNSS playback data." << std::endl; + return current__gnss_data; + } + + json current__gnss_data_json = _gnss_data["GNSS"][gnss_idx]; + // Check inputs: + if ( + current__gnss_data_json["coordinates"].is_null() || + current__gnss_data_json["coordinates"]["latitude"].is_null() || + current__gnss_data_json["coordinates"]["longitude"].is_null() || + current__gnss_data_json["coordinates"]["altitude"].is_null() || + current__gnss_data_json["ts"].is_null()) + { + std::cout << "Null GNSS playback data." << std::endl; + return current__gnss_data; + } + + + // if (!current__gnss_data_json["original__gnss_data"].is_null()) { + // if (!current__gnss_data_json["original__gnss_data"]["fix"].is_null()) { + // if (!current__gnss_data_json["original__gnss_data"]["fix"]["status"].is_null()) + // std::cout << "GNSS info: " << gps_mode2str(current__gnss_data_json["original__gnss_data"]["fix"]["status"]) << " " << current__gnss_data_json["longitude_std"] << " " << current__gnss_data_json["altitude_std"] << std::endl; + // } + // } + + auto gnss_timestamp = current__gnss_data_json["ts"].get(); + // Fill out timestamp: + if (is_microseconds(gnss_timestamp)) { + current__gnss_data.ts.setMicroseconds(gnss_timestamp); + } else if (is_nanoseconds(gnss_timestamp)) { + current__gnss_data.ts.setNanoseconds(gnss_timestamp); + } else { + std::cerr << "Warning: Invalid timestamp format from GNSS file" << std::endl; + } + + // Fill out coordinates: + current__gnss_data.setCoordinates( + current__gnss_data_json["coordinates"]["latitude"].get(), + current__gnss_data_json["coordinates"]["longitude"].get(), + current__gnss_data_json["coordinates"]["altitude"].get(), + false); + + // Fill out default standard deviation: + current__gnss_data.longitude_std = current__gnss_data_json["longitude_std"]; + current__gnss_data.latitude_std = current__gnss_data_json["latitude_std"]; + current__gnss_data.altitude_std = current__gnss_data_json["altitude_std"]; + // Fill out covariance [must be not null] + std::array position_covariance; + for (unsigned i = 0; i < 9; i++) { + position_covariance[i] = 0.0; // initialize empty covariance + + } + // set covariance diagonal + position_covariance[0] = current__gnss_data.longitude_std * current__gnss_data.longitude_std; + position_covariance[1 * 3 + 1] = current__gnss_data.latitude_std * + current__gnss_data.latitude_std; + position_covariance[2 * 3 + 2] = current__gnss_data.altitude_std * + current__gnss_data.altitude_std; + current__gnss_data.position_covariance = position_covariance; + + return current__gnss_data; +} + +sl::GNSSData GNSSReplay::getNextGNSSValue(uint64_t current_timestamp) +{ + sl::GNSSData current__gnss_data = getGNSSData(_gnss_data, _current_gnss_idx); + + if (current__gnss_data.ts.data_ns == 0) { + return current__gnss_data; + } + + if (current__gnss_data.ts.data_ns > current_timestamp) { + current__gnss_data.ts.data_ns = 0; + return current__gnss_data; + } + + sl::GNSSData last_data; + int step = 1; + while (1) { + last_data = current__gnss_data; + int64_t diff_last = + static_cast(current_timestamp) - + static_cast(current__gnss_data.ts.data_ns); + current__gnss_data = getGNSSData(_gnss_data, _current_gnss_idx + step++); + if (current__gnss_data.ts.data_ns == 0) { //error / end of file + break; + } + + if (current__gnss_data.ts.data_ns > current_timestamp) { + int64_t diff_current = + static_cast(current__gnss_data.ts.data_ns) - + static_cast(current_timestamp); + if (diff_current > diff_last) { // keep last + current__gnss_data = last_data; + } + break; + } + _current_gnss_idx++; + } + + return current__gnss_data; +} + +sl::FUSION_ERROR_CODE GNSSReplay::grab(sl::GNSSData & current_data, uint64_t current_timestamp) +{ + current_data.ts.data_ns = 0; + + if (current_timestamp > 0 && (current_timestamp > _last_cam_ts)) { + current_data = getNextGNSSValue(current_timestamp); + } + + if (current_data.ts.data_ns == _previous_ts) { + current_data.ts.data_ns = 0; + } + + _last_cam_ts = current_timestamp; + + if (current_data.ts.data_ns == 0) { // Invalid data + return sl::FUSION_ERROR_CODE::FAILURE; + } + + _previous_ts = current_data.ts.data_ns; + return sl::FUSION_ERROR_CODE::SUCCESS; +} + +} // namespace sl_tools diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_tools.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_tools.cpp new file mode 100644 index 000000000..c28743825 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_tools.cpp @@ -0,0 +1,731 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include // getuid + +#include +#include +#include + +#include +#include + +#include "sl_tools.hpp" + +namespace sl_tools +{ +std::vector convertRodrigues(sl::float3 r) +{ + float theta = sqrt(r.x * r.x + r.y * r.y + r.z * r.z); + + std::vector R = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}; + + if (theta < FLT_EPSILON) { + return R; + } else { + float c = cos(theta); + float s = sin(theta); + float c1 = 1.f - c; + float itheta = theta ? 1.f / theta : 0.f; + + r *= itheta; + + std::vector rrt = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}; + + float * p = rrt.data(); + p[0] = r.x * r.x; + p[1] = r.x * r.y; + p[2] = r.x * r.z; + p[3] = r.x * r.y; + p[4] = r.y * r.y; + p[5] = r.y * r.z; + p[6] = r.x * r.z; + p[7] = r.y * r.z; + p[8] = r.z * r.z; + + std::vector r_x = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f}; + p = r_x.data(); + p[0] = 0; + p[1] = -r.z; + p[2] = r.y; + p[3] = r.z; + p[4] = 0; + p[5] = -r.x; + p[6] = -r.y; + p[7] = r.x; + p[8] = 0; + + // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x] + + sl::Matrix3f eye; + eye.setIdentity(); + + sl::Matrix3f sl_R(R.data()); + sl::Matrix3f sl_rrt(rrt.data()); + sl::Matrix3f sl_r_x(r_x.data()); + + sl_R = eye * c + sl_rrt * c1 + sl_r_x * s; + + R[0] = sl_R.r00; + R[1] = sl_R.r01; + R[2] = sl_R.r02; + R[3] = sl_R.r10; + R[4] = sl_R.r11; + R[5] = sl_R.r12; + R[6] = sl_R.r20; + R[7] = sl_R.r21; + R[8] = sl_R.r22; + } + + return R; +} + +std::string getFullFilePath(const std::string & file_name) +{ + std::string new_filename; + if (file_name.front() == '~') { + const char * home_env = std::getenv("HOME"); + std::string home_path = home_env ? home_env : "/tmp"; + if (!home_path.empty()) { + new_filename = home_path; + new_filename += file_name.substr(1, file_name.size() - 1); + } + } else { + new_filename = file_name; + } + + std::filesystem::path path(new_filename); + auto abs_path = std::filesystem::absolute(path); + return abs_path.string(); +} + +std::string getSDKVersion(int & major, int & minor, int & sub_minor) +{ + std::string ver = sl::Camera::getSDKVersion().c_str(); + std::vector strings; + std::istringstream f(ver); + std::string s; + + while (getline(f, s, '.')) { + strings.push_back(s); + } + + major = 0; + minor = 0; + sub_minor = 0; + + switch (strings.size()) { + case 3: + sub_minor = std::stoi(strings[2]); + + case 2: + minor = std::stoi(strings[1]); + + case 1: + major = std::stoi(strings[0]); + } + + return ver; +} + +rclcpp::Time slTime2Ros(sl::Timestamp t, rcl_clock_type_t clock_type) +{ + uint64_t ts_nsec = t.getNanoseconds(); + uint32_t sec = static_cast(ts_nsec / 1000000000); + uint32_t nsec = static_cast(ts_nsec % 1000000000); + return rclcpp::Time(sec, nsec, clock_type); +} + +std::unique_ptr imageToROSmsg( + const sl::Mat & img, const std::string & frameId, const rclcpp::Time & t, bool use_pub_timestamp) +{ + std::unique_ptr imgMessage = std::make_unique(); + + imgMessage->header.stamp = use_pub_timestamp ? rclcpp::Clock().now() : t; + imgMessage->header.frame_id = frameId; + imgMessage->height = img.getHeight(); + imgMessage->width = img.getWidth(); + + int num = 1; // for endianness detection + imgMessage->is_bigendian = !(*reinterpret_cast(&num) == 1); + + imgMessage->step = img.getStepBytes(); + + size_t size = imgMessage->step * imgMessage->height; + + uint8_t * data_ptr = nullptr; + + sl::MAT_TYPE dataType = img.getDataType(); + + switch (dataType) { + case sl::MAT_TYPE::F32_C1: /**< float 1 channel.*/ + imgMessage->encoding = sensor_msgs::image_encodings::TYPE_32FC1; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::F32_C2: /**< float 2 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::TYPE_32FC2; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::F32_C3: /**< float 3 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::TYPE_32FC3; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::F32_C4: /**< float 4 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::TYPE_32FC4; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::U8_C1: /**< unsigned char 1 channel.*/ + imgMessage->encoding = sensor_msgs::image_encodings::MONO8; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::U8_C2: /**< unsigned char 2 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::TYPE_8UC2; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::U8_C3: /**< unsigned char 3 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::BGR8; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + case sl::MAT_TYPE::U8_C4: /**< unsigned char 4 channels.*/ + imgMessage->encoding = sensor_msgs::image_encodings::BGRA8; + data_ptr = reinterpret_cast(img.getPtr()); + break; + + default: + RCLCPP_ERROR( + rclcpp::get_logger("sl_tools"), + "imageToROSmsg: unsupported MAT_TYPE %d", static_cast(dataType)); + return imgMessage; + } + + if (data_ptr == nullptr) { + RCLCPP_ERROR( + rclcpp::get_logger("sl_tools"), + "imageToROSmsg: getPtr returned nullptr (GPU-only or unallocated Mat)"); + return imgMessage; + } + + imgMessage->data = std::vector(data_ptr, data_ptr + size); + + return imgMessage; +} + +std::unique_ptr imagesToROSmsg( + const sl::Mat & left, const sl::Mat & right, const std::string & frameId, + const rclcpp::Time & t, bool use_pub_timestamp) +{ + std::unique_ptr imgMsgPtr = std::make_unique(); + + if ( + left.getWidth() != right.getWidth() || left.getHeight() != right.getHeight() || + left.getChannels() != right.getChannels() || left.getDataType() != right.getDataType()) + { + return imgMsgPtr; + } + + imgMsgPtr->header.stamp = use_pub_timestamp ? rclcpp::Clock().now() : t; + imgMsgPtr->header.frame_id = frameId; + imgMsgPtr->height = left.getHeight(); + imgMsgPtr->width = 2 * left.getWidth(); + + int num = 1; // for endianness detection + imgMsgPtr->is_bigendian = !(*reinterpret_cast(&num) == 1); + + imgMsgPtr->step = 2 * left.getStepBytes(); + + size_t size = imgMsgPtr->step * imgMsgPtr->height; + imgMsgPtr->data.resize(size); + + sl::MAT_TYPE dataType = left.getDataType(); + + char * srcL; + char * srcR; + + switch (dataType) { + case sl::MAT_TYPE::F32_C1: /**< float 1 channel.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::TYPE_32FC1; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::F32_C2: /**< float 2 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::TYPE_32FC2; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::F32_C3: /**< float 3 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::TYPE_32FC3; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::F32_C4: /**< float 4 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::TYPE_32FC4; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::U8_C1: /**< unsigned char 1 channel.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::MONO8; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::U8_C2: /**< unsigned char 2 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::TYPE_8UC2; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::U8_C3: /**< unsigned char 3 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::BGR8; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + case sl::MAT_TYPE::U8_C4: /**< unsigned char 4 channels.*/ + imgMsgPtr->encoding = sensor_msgs::image_encodings::BGRA8; + srcL = reinterpret_cast(left.getPtr()); + srcR = reinterpret_cast(right.getPtr()); + break; + + default: + RCLCPP_ERROR( + rclcpp::get_logger("sl_tools"), + "imagesToROSmsg: unsupported MAT_TYPE %d", static_cast(dataType)); + return imgMsgPtr; + } + + if (srcL == nullptr || srcR == nullptr) { + RCLCPP_ERROR( + rclcpp::get_logger("sl_tools"), + "imagesToROSmsg: getPtr returned nullptr (GPU-only or unallocated Mat)"); + return imgMsgPtr; + } + + char * dest = reinterpret_cast((&imgMsgPtr->data[0])); + + for (int i = 0; i < left.getHeight(); i++) { + memcpy(dest, srcL, left.getStepBytes()); + dest += left.getStepBytes(); + memcpy(dest, srcR, right.getStepBytes()); + dest += right.getStepBytes(); + + srcL += left.getStepBytes(); + srcR += right.getStepBytes(); + } + + return imgMsgPtr; +} + +std::string qos2str(rmw_qos_history_policy_t qos) +{ + if (qos == RMW_QOS_POLICY_HISTORY_KEEP_LAST) { + return "KEEP_LAST"; + } + + if (qos == RMW_QOS_POLICY_HISTORY_KEEP_ALL) { + return "KEEP_ALL"; + } + + return "Unknown QoS value"; +} + +std::string qos2str(rmw_qos_reliability_policy_t qos) +{ + if (qos == RMW_QOS_POLICY_RELIABILITY_RELIABLE) { + return "RELIABLE"; + } + + if (qos == RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT) { + return "BEST_EFFORT"; + } + + return "Unknown QoS value"; +} + +std::string qos2str(rmw_qos_durability_policy_t qos) +{ + if (qos == RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL) { + return "TRANSIENT_LOCAL"; + } + + if (qos == RMW_QOS_POLICY_DURABILITY_VOLATILE) { + return "VOLATILE"; + } + + return "Unknown QoS value"; +} + +inline bool contains(const std::vector & poly, sl::float2 test) +{ + int i, j; + bool c = false; + const int nvert = poly.size(); + for (i = 0, j = nvert - 1; i < nvert; j = i++) { + if ( + ((poly[i].y > test.y) != (poly[j].y > test.y)) && + (test.x < + (poly[j].x - poly[i].x) * (test.y - poly[i].y) / (poly[j].y - poly[i].y) + poly[i].x)) + { + c = !c; + } + } + return c; +} + +bool generateROI(const std::vector & poly, sl::Mat & out_roi) +{ + if (poly.size() < 3) { + out_roi = sl::Mat(); + return false; + } + + // Set each pixel to valid + //std::cerr << "Setting ROI mask to full valid" << std::endl; + out_roi.setTo(255, sl::MEM::CPU); + + // ----> De-normalize coordinates + size_t w = out_roi.getWidth(); + size_t h = out_roi.getHeight(); + + //std::cerr << "De-normalize coordinates" << std::endl; + //std::cerr << "Image resolution: " << w << "x" << h << std::endl; + //std::cerr << "Polygon size: " << poly.size() << std::endl; + std::vector poly_img; + size_t idx = 0; + for (auto & it : poly) { + sl::float2 pt; + pt.x = it.x * w; + pt.y = it.y * h; + + if (pt.x >= w) { + pt.x = (w - 1); + } + if (pt.y >= h) { + pt.y = (h - 1); + } + + poly_img.push_back(pt); + + //std::cerr << "Pushed pt #: " << idx << std::endl; + ++idx; + } + // <---- De-normalize coordinates + + // ----> Unset ROI pixels outside the polygon + //std::cerr << "Unset ROI pixels outside the polygon" << std::endl; + //std::cerr << "Set mask" << std::endl; + for (int v = 0; v < h; v++) { + for (int u = 0; u < w; u++) { + if (!contains(poly_img, sl::float2(u, v))) { + out_roi.setValue(u, v, 0, sl::MEM::CPU); + } + } + } + //std::cerr << "Mask ready" << std::endl; + //std::cerr << "ROI resolution: " << w << "x" << h << std::endl; + // <---- Unset ROI pixels outside the polygon + + return true; +} + +std::vector parseStringVector_int( + const std::string & input, + std::string & error_return) +{ + std::vector result; + + if (input == "[]") { + error_return = ""; + return result; + } + + if (input.empty() || input.front() != '[' || input.back() != ']') { + error_return = "Vector string must start with [ and end with ]"; + return result; + } + + std::string trimmed = input; + trimmed.erase( + std::remove(trimmed.begin(), trimmed.end(), '['), + trimmed.end()); + trimmed.erase( + std::remove(trimmed.begin(), trimmed.end(), ']'), + trimmed.end()); + + std::stringstream ss(trimmed); + std::string token; + while (std::getline(ss, token, ',')) { + // Trim leading and trailing whitespace + token.erase(0, token.find_first_not_of(" \t\n\r")); + token.erase(token.find_last_not_of(" \t\n\r") + 1); + + if (token.empty()) { + continue; + } + + try { + int value = std::stoi(token); + result.push_back(value); + } catch (const std::exception & e) { + error_return = "Failed to parse integer: " + token; + return result; + } + } + error_return = ""; + return result; +} + +std::vector> parseStringMultiVector_float( + const std::string & input, std::string & error_return) +{ + std::vector> result; + + std::stringstream input_ss(input); + int depth = 0; + std::vector current_vector; + while (!!input_ss && !input_ss.eof()) { + switch (input_ss.peek()) { + case EOF: + break; + case '[': + depth++; + if (depth > 2) { + error_return = "Array depth greater than 2"; + return result; + } + input_ss.get(); + current_vector.clear(); + break; + case ']': + depth--; + if (depth < 0) { + error_return = "More close ] than open ["; + return result; + } + input_ss.get(); + if (depth == 1) { + result.push_back(current_vector); + } + break; + case ',': + case ' ': + case '\t': + input_ss.get(); + break; + default: // All other characters should be part of the numbers. + if (depth != 2) { + std::stringstream err_ss; + err_ss << "Numbers at depth other than 2. Char was '" << char(input_ss.peek()) << "'."; + error_return = err_ss.str(); + return result; + } + float value; + input_ss >> value; + if (!!input_ss) { + current_vector.push_back(value); + } + break; + } + } + + if (depth != 0) { + error_return = "Unterminated vector string."; + } else { + error_return = ""; + } + + return result; +} + +std::string threadSched2Str(int thread_sched_policy) +{ + switch (thread_sched_policy) { + case SCHED_OTHER: + return "SCHED_OTHER"; + case SCHED_FIFO: + return "SCHED_FIFO"; + case SCHED_RR: + return "SCHED_RR"; +#ifdef __USE_GNU + case SCHED_BATCH: + return "SCHED_BATCH"; + case SCHED_ISO: + return "SCHED_ISO"; + case SCHED_IDLE: + return "SCHED_IDLE"; + case SCHED_DEADLINE: + return "SCHED_DEADLINE"; +#endif + default: + return ""; + } +} + +bool checkRoot() +{ + if (getuid()) { + return false; + } else { + return true; + } +} + +bool ReadCocoYaml( + const std::string & label_file, std::unordered_map & out_labels) +{ + // Open the YAML file + std::ifstream file(label_file.c_str()); + if (!file.is_open()) { + return false; + } + + // Read the file line by line + std::string line; + std::vector lines; + while (std::getline(file, line)) { + lines.push_back(line); + } + + // Find the start and end of the names section + std::size_t start = 0; + std::size_t end = 0; + for (std::size_t i = 0; i < lines.size(); i++) { + if (lines[i].find("names:") != std::string::npos) { + start = i + 1; + } else if (start > 0 && lines[i].find(':') == std::string::npos) { + end = i; + break; + } + } + + // Extract the labels + for (std::size_t i = start; i < end; i++) { + std::stringstream ss(lines[i]); + std::string class_id, label; + std::getline(ss, class_id, ':'); // Extract the number before the delimiter + // ---> remove heading spaces and tabs + class_id.erase(remove(class_id.begin(), class_id.end(), ' '), class_id.end()); + class_id.erase(remove(class_id.begin(), class_id.end(), '\t'), class_id.end()); + // <--- remove heading spaces and tabs + std::getline(ss, label); // Extract the string after the delimiter + out_labels[class_id] = label; + } + + return true; +} + + +bool isZED(sl::MODEL camModel) +{ + if (camModel == sl::MODEL::ZED) { + return true; + } + return false; +} + +bool isZEDM(sl::MODEL camModel) +{ + if (camModel == sl::MODEL::ZED_M) { + return true; + } + return false; +} + +bool isZED2OrZED2i(sl::MODEL camModel) +{ + if (camModel == sl::MODEL::ZED2) { + return true; + } + if (camModel == sl::MODEL::ZED2i) { + return true; + } + return false; +} + +bool isZEDX(sl::MODEL camModel) +{ + if (camModel == sl::MODEL::ZED_X) { + return true; + } + if (camModel == sl::MODEL::ZED_XM) { + return true; + } + if (camModel == sl::MODEL::VIRTUAL_ZED_X) { + return true; + } + return false; +} + +bool isObjDetAvailable(sl::MODEL camModel) +{ + if (camModel != sl::MODEL::ZED) { + return true; + } + return false; +} + +std::string seconds2str(double sec) +{ + int days = sec / 86400; + sec -= days * 86400; + int hours = sec / 3600; + sec -= hours * 3600; + int minutes = sec / 60; + sec -= minutes * 60; + + std::stringstream ss; + ss << days << " days, " << hours << " hours, " << minutes << " min, " << sec << " sec"; + + return ss.str(); +} + +StopWatch::StopWatch(rclcpp::Clock::SharedPtr clock) +: mStartTime(0, 0, RCL_ROS_TIME), + mClockPtr(clock) +{ + tic(); // Start the timer at creation +} + +void StopWatch::tic() +{ + mStartTime = mClockPtr->now(); // Reset the start time point +} + +double StopWatch::toc(std::string func_name) +{ + auto now = mClockPtr->now(); + + double elapsed_nsec = (now - mStartTime).nanoseconds(); + if (!func_name.empty()) { + std::cerr << func_name << " -> toc elapsed_sec: " << elapsed_nsec / 1e9 << std::endl << + std::flush; + } + + return elapsed_nsec / 1e9; // Returns elapsed time in seconds +} + +} // namespace sl_tools diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_types.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_types.cpp new file mode 100644 index 000000000..c09497c6b --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_types.cpp @@ -0,0 +1,48 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sl_types.hpp" + +namespace stereolabs +{ + +std::string toString(const PubRes & res) +{ + switch (res) { + case NATIVE: + return "NATIVE"; + case CUSTOM: + return "CUSTOM"; + default: + return ""; + } +} + +std::string toString(const PcRes & res) +{ + switch (res) { + case PUB: + return "PUB"; + case FULL: + return "FULL"; + case COMPACT: + return "COMPACT"; + case REDUCED: + return "REDUCED"; + default: + return ""; + } +} + +} // namespace stereolabs diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_win_avg.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_win_avg.cpp new file mode 100644 index 000000000..bd231f07d --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/tools/src/sl_win_avg.cpp @@ -0,0 +1,71 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "sl_win_avg.hpp" + +namespace sl_tools +{ + +WinAvg::WinAvg(size_t win_size) +{ + mWinSize = win_size; + mSumVals = 0.0; +} + +WinAvg::~WinAvg() {} + +double WinAvg::setNewSize(size_t win_size) +{ + std::lock_guard guard(mQueueMux); + + mWinSize = win_size; + while (mVals.size() > mWinSize) { + double val = mVals.back(); + mVals.pop_back(); + mSumVals -= val; + } + + return mSumVals / mVals.size(); +} + +double WinAvg::addValue(double val) +{ + std::lock_guard guard(mQueueMux); + if (mVals.size() == mWinSize) { + double older = mVals.back(); + mVals.pop_back(); + mSumVals -= older; + } + + mVals.push_front(val); + mSumVals += val; + + auto avg = mSumVals / mVals.size(); + + // std::cout << "New val: " << val << " - Size: " << mVals.size() + // << " - Sum: " << mSumVals << " - Avg: " << avg << std::endl; + + return avg; +} + +double WinAvg::getAvg() +{ + std::lock_guard guard(mQueueMux); + + double avg = mSumVals / mVals.size(); + + return avg; +} + +} diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/cost_traversability.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/cost_traversability.hpp new file mode 100644 index 000000000..bd2fbacf9 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/cost_traversability.hpp @@ -0,0 +1,71 @@ +#ifndef COST_TRAVERSABILITY_HPP +#define COST_TRAVERSABILITY_HPP__ + +#include "sl/Fusion.hpp" + +namespace stereolabs +{ +namespace cost_traversability +{ +/** +\brief Defines robot parameters, this will be used to compute the TRAVERSABILITY_COST +*/ +struct RobotParameters +{ + float radius = 0.25f; + float step_max = 0.1f; + float slope_max = 20.0f; // degrees + float roughness_max = 0.1f; +}; + +/** + \brief Defines the traversability parameters, this will be used to compute the TRAVERSABILITY_COST + */ +struct TraversabilityParameters +{ + float occupancy_threshold = 0.5f; + float slope_weight = 1.f / 3.f; + float step_weight = 1.f / 3.f; + float roughness_weight = 1.f / 3.f; +}; + +constexpr sl::LayerName TRAVERSABILITY_COST = + static_cast(static_cast(sl::LayerName::LAST) + 1); +constexpr sl::LayerName OCCUPANCY = + static_cast(static_cast(sl::LayerName::LAST) + 2); +constexpr sl::LayerName TRAVERSABILITY_COST_STEP = + static_cast(static_cast(sl::LayerName::LAST) + 3); +constexpr sl::LayerName TRAVERSABILITY_COST_SLOPE = + static_cast(static_cast(sl::LayerName::LAST) + 4); +constexpr sl::LayerName TRAVERSABILITY_COST_ROUGHNESS = + static_cast(static_cast(sl::LayerName::LAST) + 5); + +constexpr float OCCUPIED_CELL = 1.f; +constexpr float FREE_CELL = 0.f; +constexpr float INVALID_CELL_DATA = NAN; +constexpr float UNKNOWN_CELL = NAN; + +void initCostTraversibily( + sl::Terrain & cost_terrain, float resolution, float range, + float height_threshold); + +void computeCost( + sl::Terrain & elevation_terrain, sl::Terrain & cost_terrain, + const float grid_resolution, RobotParameters robot_parameters, + TraversabilityParameters traversability_parameters); + +void normalization(sl::Terrain & cost_terrain, sl::LayerName layer, sl::Mat & view); + +} // namespace cost_traversability + +} // namespace stereolabs + +// SDK internal function +namespace plane +{ +void compute_pca( + std::vector & points, sl::float3 & normal_vect, sl::float3 & centroid, + sl::float3 & eigen_values); +} + +#endif // COST_TRAVERSABILITY_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/zed_camera_component.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/zed_camera_component.hpp new file mode 100644 index 000000000..c16649742 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/include/zed_camera_component.hpp @@ -0,0 +1,1191 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ZED_CAMERA_COMPONENT_HPP_ +#define ZED_CAMERA_COMPONENT_HPP_ + +#include +#include +#include +#include +#include + +#include + +#include "sl_version.hpp" +#include "sl_tools.hpp" +#include "sl_types.hpp" +#include "visibility_control.hpp" + +namespace stereolabs +{ + +class ZedCamera : public rclcpp::Node +{ +public: + ZED_COMPONENTS_PUBLIC + explicit ZedCamera(const rclcpp::NodeOptions & options); + + virtual ~ZedCamera(); + +protected: + // ----> Initialization functions + void initNode(); + void deInitNode(); + + void initParameters(); + void initServices(); + void initThreads(); + + void getDebugParams(); + void getTopicEnableParams(); + void getSimParams(); + void getGeneralParams(); + void getSvoParams(); + void getVideoParams(); + void getRoiParams(); + void getDepthParams(); + void getPosTrackingParams(); + void getGnssFusionParams(); + void getSensorsParams(); + void getMappingParams(); + void getOdParams(); + void getCustomOdParams(); + void getBodyTrkParams(); + void getStreamingServerParams(); + void getAdvancedParams(); + + void setTFCoordFrameNames(); + void initPublishers(); + void initVideoDepthPublishers(); + + void initSubscribers(); + + void fillCamInfo( + const std::shared_ptr & zed, + const sensor_msgs::msg::CameraInfo::SharedPtr & leftCamInfoMsg, + const sensor_msgs::msg::CameraInfo::SharedPtr & rightCamInfoMsg, + const std::string & leftFrameId, const std::string & rightFrameId, + bool rawParam = false); + + bool startCamera(); + bool startPosTracking(); + bool saveAreaMemoryFile(const std::string & filePath); + bool start3dMapping(); + void stop3dMapping(); + bool startObjDetect(); + void stopObjDetect(); + bool startBodyTracking(); + void stopBodyTracking(); + bool startSvoRecording(std::string & errMsg); + void stopSvoRecording(); + bool startStreamingServer(); + void stopStreamingServer(); + void closeCamera(); + // <---- Initialization functions + + // ----> Dynamic Parameters Handlers + // Video/Depth + bool handleVideoDepthDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + bool handleGmsl2Params( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + bool handleUsb3Params( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + bool handleCommonVideoParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + bool handleDepthParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + // Object Detection + bool handleOdDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + bool handleCustomOdDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + // Body Tracking + bool handleBodyTrkDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result); + // <---- Dynamic Parameters Handlers + + // ----> Callbacks + void callback_pubFusedPc(); + void callback_pubPaths(); + void callback_pubTemp(); + void callback_pubHeartbeat(); + void callback_gnssPubTimerTimeout(); + rcl_interfaces::msg::SetParametersResult callback_dynamicParamChange( + std::vector parameters); + void callback_updateDiagnostic( + diagnostic_updater::DiagnosticStatusWrapper & stat); + + void callback_enableDepth( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_resetOdometry( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_resetPosTracking( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_setPose( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_saveAreaMemory( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_enableObjDet( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_enableBodyTrk( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_enableMapping( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_enableStreaming( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_startSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_stopSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_pauseSvoInput( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_setSvoFrame( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_clickedPoint( + const geometry_msgs::msg::PointStamped::SharedPtr msg); + void callback_gnssFix(const sensor_msgs::msg::NavSatFix::SharedPtr msg); + void callback_clock(const rosgraph_msgs::msg::Clock::SharedPtr msg); + void callback_setRoi( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_resetRoi( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_toLL( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_fromLL( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + // <---- Callbacks + + // ----> Thread functions + // Main thread + void threadFunc_zedGrab(); + + // Video/Depth thread + void threadFunc_videoDepthElab(); + void setupVideoDepthThread(); + bool waitForVideoDepthData(std::unique_lock & lock); + void handleVideoDepthPublishing(); + // Point Cloud thread + void threadFunc_pointcloudElab(); + void setupPointCloudThread(); + bool waitForPointCloudData(std::unique_lock & lock); + void handlePointCloudPublishing(); + // Sensors thread + void threadFunc_pubSensorsData(); + // <---- Thread functions + + // ----> Publishing functions + + void publishImageWithInfo( + const sl::Mat & img, + const image_transport::Publisher & pubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t); + +#ifdef FOUND_ISAAC_ROS_NITROS + void publishImageWithInfo( + const sl::Mat & img, + const nitrosImgPub & nitrosPubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t); +#endif + void publishCameraInfo( + const camInfoPub & infoPub, + camInfoMsgPtr & camInfoMsg, const rclcpp::Time & t); + + void publishDepthMapWithInfo(const sl::Mat & depth, const rclcpp::Time & t); + void publishDisparity(const sl::Mat & disparity, const rclcpp::Time & t); + + void processVideoDepth(); + bool updateVideoDepthSubscribers(bool force = false); + bool areVideoDepthSubscribed(); + void retrieveVideoDepth(bool gpu); + bool retrieveLeftImage(bool gpu); + bool retrieveLeftRawImage(bool gpu); + bool retrieveRightImage(bool gpu); + bool retrieveRightRawImage(bool gpu); + bool retrieveLeftGrayImage(bool gpu); + bool retrieveLeftRawGrayImage(bool gpu); + bool retrieveRightGrayImage(bool gpu); + bool retrieveRightRawGrayImage(bool gpu); + bool retrieveDepthMap(bool gpu); + bool retrieveConfidence(bool gpu); + bool retrieveDisparity(); + bool retrieveDepthInfo(); + + void publishVideoDepth(rclcpp::Time & out_pub_ts); + void publishLeftAndRgbImages(const rclcpp::Time & t); + void publishLeftRawAndRgbRawImages(const rclcpp::Time & t); + void publishLeftGrayAndRgbGrayImages(const rclcpp::Time & t); + void publishLeftRawGrayAndRgbRawGrayImages(const rclcpp::Time & t); + void publishRightImages(const rclcpp::Time & t); + void publishRightRawImages(const rclcpp::Time & t); + void publishRightGrayImages(const rclcpp::Time & t); + void publishRightRawGrayImages(const rclcpp::Time & t); + void publishStereoImages(const rclcpp::Time & t); + void publishStereoRawImages(const rclcpp::Time & t); + void publishDepthImage(const rclcpp::Time & t); + void publishConfidenceMap(const rclcpp::Time & t); + void publishDisparityImage(const rclcpp::Time & t); + void publishDepthInfo(const rclcpp::Time & t); + void publishCameraInfos(); // Used to publish camera infos when no video/depth is subscribed + + void checkRgbDepthSync(); + bool checkGrabAndUpdateTimestamp(rclcpp::Time & out_pub_ts); + + void processPointCloud(); + bool isPointCloudSubscribed(); + void publishPointCloud(); + void publishImuFrameAndTopic(); + + void publishOdom( + tf2::Transform & odom2baseTransf, sl::Pose & slPose, const tf2::Vector3 & linear_velocity, + const tf2::Vector3 & angular_velocity, + rclcpp::Time t); + void publishPose(); + void publishPoseLandmarks(); + void publishGnssPose(); + void publishPoseStatus(); + void publishGnssPoseStatus(); + void publishGeoPoseStatus(); + void publishTFs(rclcpp::Time t); + void publishCameraTFs(rclcpp::Time t); + void publishOdomTF(rclcpp::Time t); + void publishPoseTF(rclcpp::Time t); + bool publishSensorsData(rclcpp::Time force_ts = TIMEZERO_ROS); + void publishHealthStatus(); + bool publishSvoStatus(uint64_t frame_ts); + + void publishClock(const sl::Timestamp & ts); + // <---- Publishing functions + + // ----> Utility functions + bool isDepthRequired(); + bool updatePosTrackingSubscribers(bool force = false); + bool isPosTrackingRequired(); + + void applyVideoSettings(); + void applyAutoExposureGainSettings(); + void applyExposureGainSettings(); + void applyWhiteBalanceSettings(); + void applyBrightnessContrastHueSettings(); + void applySaturationSharpnessGammaSettings(); + void applyZEDXSettings(); + void applyZEDXExposureSettings(); + void applyZEDXAutoExposureTimeRange(); + void applyZEDXExposureCompensation(); + void applyZEDXAnalogDigitalGain(); + void applyZEDXAutoAnalogGainRange(); + void applyZEDXAutoDigitalGainRange(); + void applyZEDXDenoising(); + + void applyDepthSettings(); + + void processOdometry(); + void processPose(); + void processGeoPose(); + void processSvoGnssData(); + + void processDetectedObjects(rclcpp::Time t); + void processBodies(rclcpp::Time t); + + void processRtRoi(rclcpp::Time t); + + bool setPose(float xt, float yt, float zt, float rr, float pr, float yr); + void initTransforms(); + bool getSens2BaseTransform(); + bool getSens2CameraTransform(); + bool getCamera2BaseTransform(); + bool getGnss2BaseTransform(); + + void startFusedPcTimer(double fusedPcRate); + void startPathPubTimer(double pathTimerRate); + void startTempPubTimer(); + void startHeartbeatTimer(); + + // Region of Interest + std::string getParam( + std::string paramName, + std::vector> & outVal); + std::string parseRoiPoly( + const std::vector> & in_poly, + std::vector & out_poly); + // <---- Utility functions + +private: + // ZED SDK + std::shared_ptr mZed; + sl::InitParameters mInitParams; + sl::RuntimeParameters mRunParams; + + // ----> Fusion module + std::shared_ptr mFusionConfig; + sl::Fusion mFusion; + sl::InitFusionParameters mFusionInitParams; + sl::CameraIdentifier mCamUuid; + // <---- Fusion module + + uint64_t mFrameCount = 0; + uint32_t mSvoLoopCount = 0; + + // ----> Topics + std::string mTopicRoot = "~/"; + + // Image Topics + std::string mLeftTopic; + std::string mLeftRawTopic; + std::string mRightTopic; + std::string mRightRawTopic; + std::string mRgbTopic; + std::string mRgbRawTopic; + std::string mStereoTopic; + std::string mStereoRawTopic; + std::string mLeftGrayTopic; + std::string mLeftRawGrayTopic; + std::string mRightGrayTopic; + std::string mRightRawGrayTopic; + std::string mRgbGrayTopic; + std::string mRgbRawGrayTopic; + + // Depth Topics + std::string mDisparityTopic; + std::string mDepthTopic; + std::string mDepthInfoTopic; + std::string mConfMapTopic; + std::string mPointcloudTopic; + + // Localization Topics + std::string mOdomTopic; + std::string mPoseTopic; + std::string mPoseStatusTopic; + std::string mPoseCovTopic; + std::string mGnssPoseTopic; + std::string mGnssPoseStatusTopic; + std::string mGeoPoseTopic; + std::string mGeoPoseStatusTopic; + std::string mFusedFixTopic; + std::string mOriginFixTopic; + std::string mPointcloudFusedTopic; + std::string mPointcloud3DLandmarksTopic; + std::string mObjectDetTopic; + std::string mBodyTrkTopic; + std::string mOdomPathTopic; + std::string mPosePathTopic; + std::string mClickedPtTopic; // Clicked point + std::string mRoiMaskTopic; + // <---- Topics + + // ----> Parameter variables + // Debug + bool _debugCommon = false; + bool _debugDynParams = false; + bool _debugGrab = false; + bool _debugSim = false; + bool _debugVideoDepth = false; + bool _debugCamCtrl = false; + bool _debugPointCloud = false; + bool _debugTf = false; + bool _debugPosTracking = false; + bool _debugGnss = false; + bool _debugSensors = false; + bool _debugMapping = false; + bool _debugObjectDet = false; + bool _debugBodyTrk = false; + bool _debugAdvanced = false; + bool _debugRoi = false; + bool _debugStreaming = false; + bool _debugNitros = false; + // If available, force disable NITROS usage for debugging and testing + // purposes; otherwise, this is always true. + bool _nitrosDisabled = true; + + // Topic Enablers +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + bool m24bitMode = false; +#endif + bool mPublishSensImu = true; + bool mPublishSensImuRaw = false; + bool mPublishSensMag = false; + bool mPublishSensBaro = false; + bool mPublishSensTemp = false; + bool mPublishSensImuTransf = false; + bool mPublishImgLeftRight = false; + bool mPublishImgRaw = false; + bool mPublishImgGray = false; + bool mPublishImgRgb = true; + bool mPublishImgStereo = false; + bool mPublishImgRoiMask = false; + bool mPublishOdomPose = true; + bool mPublishPoseCov = false; + bool mPublishPath = false; + bool mPublishDetPlane = false; + bool mPublishDepthMap = true; + bool mPublishDepthInfo = false; + bool mPublishPointcloud = true; + bool mPublishConfidence = false; + bool mPublishDisparity = false; + bool mPublishStatus = true; + bool mPublishSvoClock = false; + + // General + int mCamSerialNumber = 0; + int mCamId = -1; + std::vector mCamVirtualSerialNumbers; + std::vector mCamVirtualCameraIds; + bool mSimMode = false; // Expecting simulation data? + bool mUseSimTime = false; // Use sim time? + std::string mSimAddr = + "127.0.0.1"; // The local address of the machine running the simulator + int mSimPort = 30000; // The port to be used to connect to the simulator + + bool mStreamMode = false; // Expecting local streaming data? + std::string mStreamAddr = ""; // The local address of the streaming server + int mStreamPort = 30000; // The port to be used to connect to a local streaming server + + sl::MODEL mCamUserModel = sl::MODEL::ZED2i; // Default camera model + sl::MODEL mCamRealModel; // Camera model requested to SDK + unsigned int mCamFwVersion; // Camera FW version + unsigned int mSensFwVersion; // Sensors FW version + std::string mCameraName = "zed"; // Default camera name + int mCamGrabFrameRate = 15; + double mGrabComputeCappingFps = 0.0; + bool mAsyncImageRetrieval = false; + int mImageValidityCheck = 1; + std::string mSvoFilepath = ""; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + std::string mSvoDecryptionKey = ""; +#endif + bool mSvoLoop = false; + bool mSvoRealtime = false; + int mSvoFrameStart = 0; + double mSvoRate = 1.0; + double mSvoExpectedPeriod = 0.0; + bool mUseSvoTimestamp = false; + bool mUsePubTimestamps = false; + bool mGrabOnce = false; + bool mGrabImuOnce = false; + int mVerbose = 1; + std::string mVerboseLogFile = ""; + int mGpuId = -1; + std::string mOpencvCalibFile; + sl::RESOLUTION mCamResol = sl::RESOLUTION::AUTO; // Default resolution: AUTOMATIC + PubRes mPubResolution = PubRes::NATIVE; // Use native grab resolution by default + double mCustomDownscaleFactor = 1.0; // Used to rescale data with user factor + bool mOpenniDepthMode = + false; // 16 bit UC data in mm else 32F in m, + // for more info -> http://www.ros.org/reps/rep-0118.html + double mCamMinDepth = 0.01; + double mCamMaxDepth = 15.0; + sl::DEPTH_MODE mDepthMode = sl::DEPTH_MODE::NEURAL; + std::string mDepthModelOverride; // Optional model file override for depth mode + PcRes mPcResolution = PcRes::COMPACT; + std::atomic mDepthDisabled = false; // Indicates if depth calculation is not required (DEPTH_MODE::NONE) + int mDepthStabilization = 0; + + int mCamTimeoutSec = 5; + int mMaxReconnectTemp = 5; + bool mCameraSelfCalib = true; + bool mCameraFlip = false; + + + bool mSensCameraSync = false; + double mSensPubRate = 200.; + + std::vector> mRoyPolyParam; // Manual ROI polygon + bool mAutoRoiEnabled = false; + bool mManualRoiEnabled = false; + float mRoiDepthFarThresh = 2.5f; + float mRoiImgHeightRationCutOff = 0.5f; + std::unordered_set mRoiModules; + + bool mPosTrackingEnabled = false; + bool mPublishTF = false; + bool mPublishMapTF = false; + bool mPublishImuTF = false; + bool mPoseSmoothing = false; + bool mAreaMemory = true; + std::string mAreaMemoryFilePath = ""; + bool mLocalizationOnly = false; + sl::POSITIONAL_TRACKING_MODE mPosTrkMode = + sl::POSITIONAL_TRACKING_MODE::GEN_1; + bool mSaveAreaMemoryOnClosing = true; + bool mImuFusion = true; + bool mFloorAlignment = false; + bool mTwoDMode = false; + float mFixedZValue = 0.0; + std::vector mInitialBasePose = std::vector(6, 0.0); + bool mResetOdomWhenLoopClosure = true; + bool mResetPoseWithSvoLoop = true; + bool mPublish3DLandmarks = false; + uint8_t mPublishLandmarkSkipFrame = 15; + double mPathPubRate = 2.0; + double mTfOffset = 0.0; + float mPosTrackDepthMinRange = 0.0f; + bool mSetAsStatic = false; + bool mSetGravityAsOrigin = false; + int mPathMaxCount = -1; + + bool mGnssFusionEnabled = false; + std::string mGnssTopic = "/gps/fix"; + bool mGnssEnableReinitialization = true; + bool mGnssEnableRollingCalibration = true; + bool mGnssEnableTranslationUncertaintyTarget = false; + double mGnssVioReinitThreshold = 5.0; + double mGnssTargetTranslationUncertainty = 0.1; + double mGnssTargetYawUncertainty = 0.1; + double mGnssHcovMul = 1.0; + double mGnssVcovMul = 1.0; + bool mGnssZeroAltitude = false; + bool mPublishUtmTf = true; + bool mUtmAsParent = true; + + bool mMappingEnabled = false; + float mMappingRes = 0.05f; + float mMappingRangeMax = 10.0f; + + bool mObjDetEnabled = false; + bool mObjDetTracking = true; + double mObjDetPredTimeout = 0.5; + bool mObjDetReducedPrecision = false; + double mObjDetMaxRange = 15.0; + std::vector mObjDetFilter; + std::map mObjDetClassConfMap; + bool mObjDetPeopleEnable = true; + double mObjDetPeopleConf = 50.0; + bool mObjDetVehiclesEnable = true; + double mObjDetVehiclesConf = 50.0; + bool mObjDetBagsEnable = true; + double mObjDetBagsConf = 50.0; + bool mObjDetAnimalsEnable = true; + double mObjDetAnimalsConf = 50.0; + bool mObjDetElectronicsEnable = true; + double mObjDetElectronicsConf = 50.0; + bool mObjDetFruitsEnable = true; + double mObjDetFruitsConf = 50.0; + bool mObjDetSportEnable = true; + double mObjDetSportConf = 50.0; + bool mObjDetRtParamsDirty = true; // Force initial setRuntimeParameters call + sl::OBJECT_DETECTION_MODEL mObjDetModel = + sl::OBJECT_DETECTION_MODEL::MULTI_CLASS_BOX_FAST; + sl::OBJECT_FILTERING_MODE mObjFilterMode = sl::OBJECT_FILTERING_MODE::NMS3D; + std::string mYoloOnnxPath = ""; + int mYoloOnnxSize = 512; + int mCustomClassCount = 1; + std::unordered_map mCustomOdProperties; + std::unordered_map mCustomLabels; + std::unordered_map mCustomClassIdMap; + + bool mBodyTrkEnabled = false; + sl::BODY_TRACKING_MODEL mBodyTrkModel = + sl::BODY_TRACKING_MODEL::HUMAN_BODY_FAST; + sl::BODY_FORMAT mBodyTrkFmt = sl::BODY_FORMAT::BODY_38; + bool mBodyTrkReducedPrecision = false; + double mBodyTrkMaxRange = 15.0f; + sl::BODY_KEYPOINTS_SELECTION mBodyTrkKpSelection = + sl::BODY_KEYPOINTS_SELECTION::FULL; + bool mBodyTrkFitting = true; + bool mBodyTrkEnableTracking = true; + double mBodyTrkPredTimeout = 0.5; + double mBodyTrkConfThresh = 50.0; + int mBodyTrkMinKp = 10; + bool mBodyTrkRtParamsDirty = true; // Force initial setRuntimeParameters call + + double mPdMaxDistanceThreshold = 0.15; + double mPdNormalSimilarityThreshold = 15.0; + + bool mChangeThreadSched = false; + std::string mThreadSchedPolicy; + int mThreadPrioGrab = 50; + int mThreadPrioSens = 70; + int mThreadPrioPointCloud = 60; + + std::atomic mStreamingServerRequired; + sl::STREAMING_CODEC mStreamingServerCodec = sl::STREAMING_CODEC::H264; + int mStreamingServerPort = 30000; + int mStreamingServerBitrate = 12500; + int mStreamingServerGopSize = -1; + bool mStreamingServerAdaptiveBitrate = false; + int mStreamingServerChunckSize = 16084; + int mStreamingServerTargetFramerate = 0; + // <---- Parameter variables + + // ----> Dynamic params + OnSetParametersCallbackHandle::SharedPtr mParamChangeCallbackHandle; + + double mVdPubRate = 15.0; + int mCamBrightness = 4; + int mCamContrast = 4; + int mCamHue = 0; + int mCamSaturation = 4; + int mCamSharpness = 4; + int mCamGamma = 8; + bool mCamAutoExpGain = true; + int mCamGain = 80; + int mCamExposure = 80; + bool mCamAutoWB = true; + int mCamWBTemp = 42; + int mDepthConf = 95; + int mDepthTextConf = 100; + double mPcPubRate = 10.0; + double mFusedPcPubRate = 1.0; + bool mRemoveSatAreas = true; + + int mGmslExpTime = 16666; + int mGmslAutoExpTimeRangeMin = 28; + int mGmslAutoExpTimeRangeMax = 30000; + int mGmslExposureComp = 50; + int mGmslAnalogGain = 8000; + int mGmslAnalogGainRangeMin = 1000; + int mGmslAnalogGainRangeMax = 16000; + int mGmslDigitalGain = 128; + int mGmslAutoDigitalGainRangeMin = 1; + int mGmslAutoDigitalGainRangeMax = 256; + int mGmslDenoising = 50; + // <---- Dynamic params + + // ----> QoS + // https://github.com/ros2/ros2/wiki/About-Quality-of-Service-Settings + rclcpp::QoS mQos; + rclcpp::PublisherOptions mPubOpt; + rclcpp::SubscriptionOptions mSubOpt; + // <---- QoS + + // ----> Frame IDs + bool mStaticTfPublished = false; + bool mStaticImuTfPublished = false; + + std::string mBaseFrameId = ""; + std::string mCenterFrameId = ""; + + std::string mRightCamFrameId = ""; + std::string mRightCamOptFrameId = ""; + std::string mLeftCamFrameId = ""; + std::string mLeftCamOptFrameId = ""; + + std::string mImuFrameId = ""; + std::string mBaroFrameId = ""; + std::string mMagFrameId = ""; + std::string mTempLeftFrameId = ""; + std::string mTempRightFrameId = ""; + + std::string mDepthFrameId = ""; + std::string mDepthOptFrameId = ""; + + std::string mPointCloudFrameId = ""; + + std::string mUtmFrameId = "utm"; + std::string mMapFrameId = "map"; + std::string mOdomFrameId = "odom"; + std::string mGnssFrameId = ""; + std::string mGnssOriginFrameId = "gnss_ref_pose"; + // <---- Frame IDs + + // ----> Stereolabs Mat Info + int mCamWidth; // Camera frame width + int mCamHeight; // Camera frame height + sl::Resolution mMatResol; + sl::Resolution mPcResol; + // <---- Stereolabs Mat Info + + // Camera IMU transform + sl::Transform mSlCamImuTransf; + + // ----> initialization Transform listener + std::unique_ptr mTfBuffer; + std::unique_ptr mTfListener; + std::unique_ptr mStaticTfBroadcaster; + std::unique_ptr mTfBroadcaster; + // <---- initialization Transform listener + + // ----> TF Transforms + tf2::Transform + mMap2OdomTransf; // Coordinates of the odometry frame in map frame + tf2::Transform mOdom2BaseTransf; // Coordinates of the base in odometry frame + tf2::Transform mMap2BaseTransf; // Coordinates of the base in map frame + tf2::Transform + mSensor2BaseTransf; // Coordinates of the base frame in sensor frame + tf2::Vector3 linear_base; // Linear twist in the camera base link frame + tf2::Vector3 angular_base; // Angular twist in the camera base link frame + tf2::Transform + mSensor2CameraTransf; // Coordinates of the camera frame in sensor frame + tf2::Transform + mCamera2BaseTransf; // Coordinates of the base frame in camera frame + tf2::Transform mMap2UtmTransf; // Coordinates of the UTM frame in map frame + tf2::Transform + mGnss2BaseTransf; // Coordinates of the base in GNSS sensor frame + // <---- TF Transforms + + // ----> TF Transforms Flags + bool mSensor2BaseTransfValid = false; + bool mSensor2BaseTransfFirstErr = true; + bool mSensor2CameraTransfValid = false; + bool mSensor2CameraTransfFirstErr = true; + bool mCamera2BaseTransfValid = false; + bool mCamera2BaseFirstErr = true; + bool mGnss2BaseTransfValid = false; + bool mGnss2BaseTransfFirstErr = true; + bool mMap2UtmTransfValid = false; + + + // <---- TF Transforms Flags + + // ----> Messages (ONLY THOSE NOT CHANGING WHILE NODE RUNS) + // Camera infos + camInfoMsgPtr mLeftCamInfoMsg; + camInfoMsgPtr mRightCamInfoMsg; + camInfoMsgPtr mLeftCamInfoRawMsg; + camInfoMsgPtr mRightCamInfoRawMsg; + // <---- Messages + + // ----> Publishers + clockPub mPubClock; + + // Image publishers with camera info + image_transport::Publisher mPubRgb; + image_transport::Publisher mPubRawRgb; + image_transport::Publisher mPubLeft; + image_transport::Publisher mPubRawLeft; + image_transport::Publisher mPubRight; + image_transport::Publisher mPubRawRight; + image_transport::Publisher mPubRgbGray; + image_transport::Publisher mPubRawRgbGray; + image_transport::Publisher mPubLeftGray; + image_transport::Publisher mPubRawLeftGray; + image_transport::Publisher mPubRightGray; + image_transport::Publisher mPubRawRightGray; + image_transport::Publisher mPubRoiMask; + image_transport::Publisher mPubDepth; + image_transport::Publisher mPubConfMap; +#ifdef FOUND_ISAAC_ROS_NITROS + // Nitros image publishers with camera info + nitrosImgPub mNitrosPubRgb; + nitrosImgPub mNitrosPubRawRgb; + nitrosImgPub mNitrosPubLeft; + nitrosImgPub mNitrosPubRawLeft; + nitrosImgPub mNitrosPubRight; + nitrosImgPub mNitrosPubRawRight; + nitrosImgPub mNitrosPubRgbGray; + nitrosImgPub mNitrosPubRawRgbGray; + nitrosImgPub mNitrosPubLeftGray; + nitrosImgPub mNitrosPubRawLeftGray; + nitrosImgPub mNitrosPubRightGray; + nitrosImgPub mNitrosPubRawRightGray; + nitrosImgPub mNitrosPubRoiMask; + nitrosImgPub mNitrosPubDepth; + nitrosImgPub mNitrosPubConfMap; +#endif + + // Image publishers without camera info (no NITROS) + image_transport::Publisher mPubStereo; + image_transport::Publisher mPubRawStereo; + + // Camera Info publishers + camInfoPub mPubRgbCamInfo; + camInfoPub mPubRawRgbCamInfo; + camInfoPub mPubLeftCamInfo; + camInfoPub mPubRawLeftCamInfo; + camInfoPub mPubRightCamInfo; + camInfoPub mPubRawRightCamInfo; + camInfoPub mPubRgbGrayCamInfo; + camInfoPub mPubRawRgbGrayCamInfo; + camInfoPub mPubLeftGrayCamInfo; + camInfoPub mPubRawLeftGrayCamInfo; + camInfoPub mPubRightGrayCamInfo; + camInfoPub mPubRawRightGrayCamInfo; + camInfoPub mPubRoiMaskCamInfo; + camInfoPub mPubDepthCamInfo; + camInfoPub mPubConfMapCamInfo; + camInfoPub mPubRgbCamInfoTrans; + camInfoPub mPubRawRgbCamInfoTrans; + camInfoPub mPubLeftCamInfoTrans; + camInfoPub mPubRawLeftCamInfoTrans; + camInfoPub mPubRightCamInfoTrans; + camInfoPub mPubRawRightCamInfoTrans; + camInfoPub mPubRgbGrayCamInfoTrans; + camInfoPub mPubRawRgbGrayCamInfoTrans; + camInfoPub mPubLeftGrayCamInfoTrans; + camInfoPub mPubRawLeftGrayCamInfoTrans; + camInfoPub mPubRightGrayCamInfoTrans; + camInfoPub mPubRawRightGrayCamInfoTrans; + camInfoPub mPubRoiMaskCamInfoTrans; + camInfoPub mPubDepthCamInfoTrans; + camInfoPub mPubConfMapCamInfoTrans; + +#ifdef FOUND_POINT_CLOUD_TRANSPORT + point_cloud_transport::Publisher mPubCloud; + point_cloud_transport::Publisher mPubFusedCloud; + point_cloud_transport::Publisher mPub3DLandmarks; +#else + pointcloudPub mPubCloud; + pointcloudPub mPubFusedCloud; + pointcloudPub mPub3DLandmarks; +#endif + + svoStatusPub mPubSvoStatus; + healthStatusPub mPubHealthStatus; + heartbeatStatusPub mPubHeartbeatStatus; + disparityPub mPubDisparity; + posePub mPubPose; + poseStatusPub mPubPoseStatus; + poseCovPub mPubPoseCov; + odomPub mPubOdom; + odomPub mPubGnssPose; + int mFrameSkipCountLandmarks = 0; + gnssFusionStatusPub mPubGnssPoseStatus; + pathPub mPubOdomPath; + pathPub mPubPosePath; + imuPub mPubImu; + imuPub mPubImuRaw; + tempPub mPubImuTemp; + magPub mPubImuMag; + pressPub mPubPressure; + tempPub mPubTempL; + tempPub mPubTempR; + transfPub mPubCamImuTransf; + objPub mPubObjDet; + objPub mPubBodyTrk; + depthInfoPub mPubDepthInfo; + planePub mPubPlane; + markerPub mPubMarker; + + geoPosePub mPubGeoPose; + gnssFusionStatusPub mPubGeoPoseStatus; + gnssFixPub mPubFusedFix; + gnssFixPub mPubOriginFix; + // <---- Publishers + + // <---- Publisher variables + sl::Timestamp mSdkGrabTS = 0; + size_t mRgbSubCount = 0; + size_t mRgbRawSubCount = 0; + size_t mRgbGraySubCount = 0; + size_t mRgbGrayRawSubCount = 0; + size_t mLeftSubCount = 0; + size_t mLeftRawSubCount = 0; + size_t mLeftGraySubCount = 0; + size_t mLeftGrayRawSubCount = 0; + size_t mRightSubCount = 0; + size_t mRightRawSubCount = 0; + size_t mRightGraySubCount = 0; + size_t mRightGrayRawSubCount = 0; + size_t mStereoSubCount = 0; + size_t mStereoRawSubCount = 0; + size_t mDepthSubCount = 0; + size_t mConfMapSubCount = 0; + size_t mDisparitySubCount = 0; + size_t mDepthInfoSubCount = 0; + size_t mPcSubCount = 0; + std::chrono::steady_clock::time_point mLastVideoDepthSubCountQuery; + bool mVideoDepthSubCountInit = false; + size_t mPosTrackingSubCount = 0; + std::chrono::steady_clock::time_point mLastPosTrackingSubCountQuery; + bool mPosTrackingSubCountInit = false; + + sl::Mat mMatLeft, mMatLeftRaw; + sl::Mat mMatRight, mMatRightRaw; + sl::Mat mMatLeftGray, mMatLeftRawGray; + sl::Mat mMatRightGray, mMatRightRawGray; + sl::Mat mMatDepth, mMatDisp, mMatConf; + + float mMinDepth = 0.0f; + float mMaxDepth = 0.0f; + // <---- Publisher variables + + // ----> Point cloud variables + sl::Mat mMatCloud; + sl::FusedPointCloud mFusedPC; + sensor_msgs::msg::PointCloud2 mPcMsg; // Reused across frames to avoid per-frame allocation + // <---- Point cloud variables + + // ----> Subscribers + clickedPtSub mClickedPtSub; + gnssFixSub mGnssFixSub; + clockSub mClockSub; + // <---- Subscribers + + // ----> Threads and Timers + sl::ERROR_CODE mGrabStatus; + sl::ERROR_CODE mConnStatus; + sl::FUSION_ERROR_CODE mFusionStatus = sl::FUSION_ERROR_CODE::MODULE_NOT_ENABLED; + std::thread mGrabThread; // Main grab thread + std::thread mVdThread; // Video and Depth data processing thread + std::thread mPcThread; // Point Cloud publish thread + std::thread mSensThread; // Sensors data publish thread + std::atomic mThreadStop; + std::atomic mNodeDeinitialized; + rclcpp::TimerBase::SharedPtr mInitTimer; + rclcpp::TimerBase::SharedPtr mPathTimer; + rclcpp::TimerBase::SharedPtr mFusedPcTimer; + rclcpp::TimerBase::SharedPtr + mTempPubTimer; // Timer to retrieve and publish CMOS temperatures + rclcpp::TimerBase::SharedPtr mGnssPubCheckTimer; + rclcpp::TimerBase::SharedPtr mHeartbeatTimer; + double mSensRateComp = 1.0; + // <---- Threads and Timers + + // ----> Thread Sync + std::mutex mRecMutex; + std::mutex mDynParMutex; + std::mutex mMappingMutex; + std::mutex mObjDetMutex; + std::mutex mBodyTrkMutex; + std::mutex mPcMutex; + std::mutex mCloseCameraMutex; + std::mutex mPtMutex; + std::condition_variable mPcDataReadyCondVar; + std::atomic_bool mPcDataReady; + std::mutex mVdMutex; + std::condition_variable mVdDataReadyCondVar; + std::atomic_bool mVdDataReady; + // <---- Thread Sync + + // ----> Status Flags + bool mDebugMode = false; // Debug mode active? + bool mSvoMode = false; + std::atomic mSvoPause{false}; + int mSvoFrameId = 0; + int mSvoFrameCount = 0; + bool mPosTrackingStarted = false; + std::atomic_bool mPoseLocked = false; + std::atomic mPoseLockCount{0}; + bool mVdPublishing = false; // Indicates if video and depth data are + // subscribed and then published + bool mPcPublishing = + false; // Indicates if point cloud data are subscribed and then published + bool mTriggerAutoExpGain = true; // Triggered on start + bool mTriggerAutoWB = true; // Triggered on start + bool mCamSettingsDirty = true; // Force initial apply on start + bool mRecording = false; + sl::RecordingStatus mRecStatus = sl::RecordingStatus(); + bool mPosTrackingReady = false; + + sl::FusedPositionalTrackingStatus mFusedPosTrackingStatus; + sl::PositionalTrackingStatus mPosTrackingStatus; + + sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE mAutoRoiStatus = + sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE::NOT_ENABLED; + + bool mAreaFileExists = false; + bool mResetOdomFromSrv = false; + bool mSpatialMappingRunning = false; + bool mObjDetRunning = false; + bool mBodyTrkRunning = false; + bool mRgbSubscribed = false; + bool mGnssMsgReceived = false; // Indicates if a NavSatFix topic has been + // received, also with invalid position fix + bool mGnssFixValid = false; // Used to keep track of signal loss + bool mGnssFixNew = false; // Used to keep track of signal loss + std::string mGnssServiceStr = ""; + uint16_t mGnssService; + std::atomic mClockAvailable; // Indicates if the "/clock" topic is + // published when `use_sim_time` is true + + std::atomic mStreamingServerRunning; + + bool mUsingCustomOd = false; + uint64_t mHeartbeatCount = 0; + // <---- Status Flags + + // ----> Positional Tracking + sl::Pose mLastZedPose; + sl::Pose mLastZedDeltaOdom; + sl::Transform mInitialPoseSl; + std::vector mOdomPath; + std::vector mPosePath; + sl::GeoPose mLastGeoPose; + sl::ECEF mLastEcefPose; + sl::UTM mLastUtmPose; + sl::LatLng mLastLatLongPose; + double mLastHeading; + tf2::Quaternion mLastHeadingQuat; + sl::ECEF mInitEcefPose; + sl::UTM mInitUtmPose; + sl::LatLng mInitLatLongPose; + double mInitHeading; + bool mGnssInitGood = false; + sl::float3 mGnssAntennaPose; + // <---- Positional Tracking + + // ----> Diagnostic + sl_tools::StopWatch mUptimer; + bool mUsingIPC = false; + float mTempImu = NOT_VALID_TEMP; + float mTempLeft = NOT_VALID_TEMP; + float mTempRight = NOT_VALID_TEMP; + std::unique_ptr mElabPeriodMean_sec; + std::unique_ptr mGrabPeriodMean_sec; + std::unique_ptr mVideoDepthPeriodMean_sec; + std::unique_ptr mVideoDepthElabMean_sec; + std::unique_ptr mPcPeriodMean_sec; + std::unique_ptr mPcProcMean_sec; + std::unique_ptr mImuPeriodMean_sec; + std::unique_ptr mBaroPeriodMean_sec; + std::unique_ptr mMagPeriodMean_sec; + std::unique_ptr mObjDetPeriodMean_sec; + std::unique_ptr mObjDetElabMean_sec; + std::unique_ptr mBodyTrkPeriodMean_sec; + std::unique_ptr mBodyTrkElabMean_sec; + std::unique_ptr mPubFusedCloudPeriodMean_sec; + std::unique_ptr mPubOdomTF_sec; + std::unique_ptr mPubPoseTF_sec; + std::unique_ptr mPubImuTF_sec; + std::unique_ptr mGnssFix_sec; + bool mImuPublishing = false; + bool mMagPublishing = false; + bool mBaroPublishing = false; + bool mObjDetSubscribed = false; + bool mBodyTrkSubscribed = false; + + diagnostic_updater::Updater mDiagUpdater; // Diagnostic Updater + + sl_tools::StopWatch mImuTfFreqTimer; + sl_tools::StopWatch mGrabFreqTimer; + sl_tools::StopWatch mImuFreqTimer; + sl_tools::StopWatch mBaroFreqTimer; + sl_tools::StopWatch mMagFreqTimer; + sl_tools::StopWatch mOdomFreqTimer; + sl_tools::StopWatch mPoseFreqTimer; + sl_tools::StopWatch mPcPubFreqTimer; + sl_tools::StopWatch mVdPubFreqTimer; + sl_tools::StopWatch mSensPubFreqTimer; + sl_tools::StopWatch mOdFreqTimer; + sl_tools::StopWatch mBtFreqTimer; + sl_tools::StopWatch mPcFreqTimer; + sl_tools::StopWatch mGnssFixFreqTimer; + + int mSysOverloadCount = 0; + // <---- Diagnostic + + // ----> Timestamps + sl::Timestamp mLastTs_grab = 0; // Used to calculate stable publish frequency + rclcpp::Time mFrameTimestamp; + rclcpp::Time mGnssTimestamp; + rclcpp::Time mLastTs_imu; + rclcpp::Time mLastTs_baro; + rclcpp::Time mLastTs_mag; + rclcpp::Time mLastTs_odom; + rclcpp::Time mLastTs_pose; + rclcpp::Time mLastTs_pc; + rclcpp::Time mPrevTs_pc; + uint64_t mLastTs_gnss_nsec = 0; + rclcpp::Time mLastClock; + // <---- Timestamps + + // ----> SVO Recording parameters + unsigned int mSvoRecBitrate = 0; + sl::SVO_COMPRESSION_MODE mSvoRecCompression = sl::SVO_COMPRESSION_MODE::H265; + unsigned int mSvoRecFramerate = 0; + bool mSvoRecTranscode = false; + std::string mSvoRecFilename; + // <---- SVO Recording parameters + + // ----> Services + enableDepthPtr mEnableDepthSrv; + resetOdomSrvPtr mResetOdomSrv; + resetPosTrkSrvPtr mResetPosTrkSrv; + setPoseSrvPtr mSetPoseSrv; + saveAreaMemorySrvPtr mSaveAreaMemorySrv; + enableObjDetPtr mEnableObjDetSrv; + enableBodyTrkPtr mEnableBodyTrkSrv; + enableMappingPtr mEnableMappingSrv; + startSvoRecSrvPtr mStartSvoRecSrv; + stopSvoRecSrvPtr mStopSvoRecSrv; + pauseSvoSrvPtr mPauseSvoSrv; + setSvoFramePtr mSetSvoFrameSrv; + setRoiSrvPtr mSetRoiSrv; + resetRoiSrvPtr mResetRoiSrv; + toLLSrvPtr mToLlSrv; + fromLLSrvPtr mFromLlSrv; + enableStreamingPtr mEnableStreamingSrv; + + sl_tools::StopWatch mSetSvoFrameCheckTimer; + // <---- Services + + // ----> Services names + const std::string mSrvEnableDepthName = "enable_depth"; + const std::string mSrvResetOdomName = "reset_odometry"; + const std::string mSrvResetPoseName = "reset_pos_tracking"; + const std::string mSrvSetPoseName = "set_pose"; + const std::string mSrvSaveAreaMemoryName = "save_area_memory"; + const std::string mSrvEnableObjDetName = "enable_obj_det"; + const std::string mSrvEnableBodyTrkName = "enable_body_trk"; + const std::string mSrvEnableMappingName = "enable_mapping"; + const std::string mSrvEnableStreamingName = "enable_streaming"; + const std::string mSrvStartSvoRecName = "start_svo_rec"; + const std::string mSrvStopSvoRecName = "stop_svo_rec"; + const std::string mSrvToggleSvoPauseName = "toggle_svo_pause"; + const std::string mSrvSetSvoFrameName = "set_svo_frame"; + const std::string mSrvSetRoiName = "set_roi"; + const std::string mSrvResetRoiName = "reset_roi"; + const std::string mSrvToLlName = "toLL"; // Convert from `map` to `Lat Long` + const std::string mSrvFromLlName = "fromLL"; // Convert from `Lat Long` to `map` + // <---- Services names + + // ----> SVO v2 + std::unique_ptr mGnssReplay; + // <---- SVO v2 +}; + +} // namespace stereolabs + +#endif // ZED_CAMERA_COMPONENT_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/cost_traversability.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/cost_traversability.cpp new file mode 100644 index 000000000..3bcaf29cf --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/cost_traversability.cpp @@ -0,0 +1,194 @@ +#include "cost_traversability.hpp" + +namespace stereolabs +{ +namespace cost_traversability +{ +template +T clamp(T const & v, T const & lo, T const & hi) +{ + return (v < lo) ? lo : ((v > hi) ? hi : v); +} + +void initCostTraversibily( + sl::Terrain & cost_terrain, float resolution, float range, + float height_threshold) +{ + std::vector layers; + layers.push_back(TRAVERSABILITY_COST); + layers.push_back(OCCUPANCY); + layers.push_back(TRAVERSABILITY_COST_STEP); + layers.push_back(TRAVERSABILITY_COST_SLOPE); + layers.push_back(TRAVERSABILITY_COST_ROUGHNESS); + + auto range_cell = round(range / resolution); + cost_terrain.init(resolution, range_cell, layers); +} + +void computeCost( + sl::Terrain & elevation_terrain, sl::Terrain & cost_terrain, + const float grid_resolution, RobotParameters robot_parameters, + TraversabilityParameters traversability_parameters) +{ + auto square_size_cost = robot_parameters.radius / grid_resolution; + if (square_size_cost < 1) { + square_size_cost = 1; + } + + auto factor_step_ = traversability_parameters.step_weight / robot_parameters.step_max; + auto factor_slope_ = traversability_parameters.slope_weight / robot_parameters.slope_max; + auto factor_roughness_ = traversability_parameters.roughness_weight / + robot_parameters.roughness_max; + + // Update only the recent one, and manage the border ? + const float step_height_crit = robot_parameters.step_max; + + double reso_d = grid_resolution * 1.; + + sl::Timestamp ts_tmp_elevation, ts_tmp_cost; + + double a_rad = robot_parameters.radius * 1.; + int nb_cells = (2. * a_rad) / reso_d; // big agent with small grid size is heavier to compute + + const sl::float3 z_vector(0, 0, 1); + + auto chunks_idx = elevation_terrain.getAllValidChunk(); + + // for each chunk + for (auto chunk_id : chunks_idx) { + + auto & chunk_elevation = elevation_terrain.getChunk(chunk_id); + auto & layer_height = chunk_elevation.getLayer(sl::LayerName::ELEVATION); + + auto & chunk_cost = cost_terrain.getChunk(chunk_id); + + chunk_cost.getLayer(TRAVERSABILITY_COST).clear(); + chunk_cost.getLayer(OCCUPANCY).clear(); + chunk_cost.getLayer(TRAVERSABILITY_COST_STEP).clear(); + chunk_cost.getLayer(TRAVERSABILITY_COST_SLOPE).clear(); + chunk_cost.getLayer(TRAVERSABILITY_COST_ROUGHNESS).clear(); + + auto & cost_data = chunk_cost.getLayer(TRAVERSABILITY_COST).getData(); + auto & occupancy_data = chunk_cost.getLayer(OCCUPANCY).getData(); + auto & cost_step_data = chunk_cost.getLayer(TRAVERSABILITY_COST_STEP).getData(); + auto & cost_slope_data = chunk_cost.getLayer(TRAVERSABILITY_COST_SLOPE).getData(); + auto & cost_roughness_data = chunk_cost.getLayer(TRAVERSABILITY_COST_ROUGHNESS).getData(); + + auto dim = chunk_elevation.getDimension(); + const int size_ = dim.getSize() * dim.getSize(); + + auto & elevation_data = layer_height.getData(); + + unsigned int idx_tmp; + float x, y; + for (unsigned int idx_current = 0; idx_current < size_; idx_current++) { + const float ref_height = elevation_data[idx_current]; + if (std::isfinite(ref_height)) { + dim.index2x_y(idx_current, x, y); + // SLOPE + std::vector normals_tmp; + normals_tmp.reserve(nb_cells * nb_cells); + + float max_diff_height = 0; + + double x_area_min = x - a_rad; + double y_area_min = y - a_rad; + + for (int x_ = 0; x_ < nb_cells; x_++) { + float x_v = x_area_min + (x_ * reso_d); + for (int y_ = 0; y_ < nb_cells; y_++) { + float y_v = y_area_min + (y_ * reso_d); + + float curr_height; + if (dim.getIndex(x_v, y_v, idx_tmp) /*True = error*/) { + // Probably chunk edges + curr_height = elevation_terrain.readValue(sl::LayerName::ELEVATION, x_v, y_v); + } else { + curr_height = elevation_data[idx_tmp]; + } + + if (std::isfinite(curr_height)) { + normals_tmp.emplace_back(x_v, y_v, curr_height); + max_diff_height = std::max(max_diff_height, fabsf32(curr_height - ref_height)); + } + } + } + + sl::float3 normal, centroid, eigen_values; + plane::compute_pca(normals_tmp, normal, centroid, eigen_values); + + float roughness = 0, slope = 0, step = 0, cost; + + if (max_diff_height > step_height_crit) { + step = max_diff_height; + } + + if (normals_tmp.size() >= 3) { // minimum points + float norm_len = sqrt(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z); + if (norm_len > 0.f) { + float cos_angle = std::clamp( + sl::float3::dot(normal, z_vector) / norm_len, + -1.0f, 1.0f); + slope = acos(cos_angle) * 57.295779513; // the norm of z_vector is 1 + } + + } + if (slope > 90) { + slope = 180.f - slope; + } + + roughness = sqrt(eigen_values.z); // Standard deviation of fitted plane + + cost = clamp( + (roughness * factor_roughness_ + slope * factor_slope_ + step * factor_step_) * 0.3f, + 0.f, 1.f); + cost_data[idx_current] = cost; + occupancy_data[idx_current] = + (cost > traversability_parameters.occupancy_threshold) ? OCCUPIED_CELL : FREE_CELL; + if (slope == 0) { + slope = INVALID_CELL_DATA; + } + cost_slope_data[idx_current] = slope; + cost_step_data[idx_current] = step; + cost_roughness_data[idx_current] = roughness; + } else { + occupancy_data[idx_current] = UNKNOWN_CELL; + } + } + } +} + +static const sl::float3 clr_a(244, 242, 246); +static const sl::float3 clr_b(0, 0, 0); + +// generate a linear ColorMap to match ogl interpol +inline sl::uchar3 getColorMap(float value) +{ + auto new_clr = clr_a * value + clr_b * (1.f - value); + return sl::uchar3(new_clr.b, new_clr.g, new_clr.r); +} + +void normalization(sl::Terrain & cost_terrain, sl::LayerName layer, sl::Mat & view) +{ + sl::Mat cost; + auto cost_mat = cost_terrain.retrieveView(cost, sl::MAT_TYPE::F32_C1, layer); + auto cost_res = cost.getResolution(); + view.alloc(cost_res, sl::MAT_TYPE::U8_C3); + + for (int y = 0; y < cost_res.height; y++) { + + auto ptr_cost = cost.getPtr() + y * cost.getStep(); + auto ptr_view = view.getPtr() + y * view.getStep(); + + for (int x = 0; x < cost_res.width; x++) { + float cost = ptr_cost[x]; + if (std::isfinite(cost)) { + ptr_view[x] = getColorMap(cost); + } else { + ptr_view[x] = sl::uchar3(22, 22, 22); + } + } + } +} +} +} diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_bodytrk.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_bodytrk.cpp new file mode 100644 index 000000000..abb9ebb2c --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_bodytrk.cpp @@ -0,0 +1,430 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_component.hpp" +#include "sl_logging.hpp" +#include "sl_tools.hpp" + +namespace stereolabs +{ +void ZedCamera::getBodyTrkParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== Body Track. parameters ==="); + if (sl_tools::isZED(mCamUserModel)) { + RCLCPP_WARN( + get_logger(), + "!!! Body Tracking parameters are not used with ZED!!!"); + return; + } + + sl_tools::getParam( + shared_from_this(), "body_tracking.bt_enabled", + mBodyTrkEnabled, mBodyTrkEnabled, + " * Body Track. enabled: "); + + sl_tools::getEnumParam( + shared_from_this(), "body_tracking.model", "HUMAN_BODY_FAST", + sl::BODY_TRACKING_MODEL::HUMAN_BODY_FAST, + sl::BODY_TRACKING_MODEL::LAST, mBodyTrkModel, + " * Body Track. model: "); + + sl_tools::getEnumParam( + shared_from_this(), "body_tracking.body_format", "BODY_70", + sl::BODY_FORMAT::BODY_18, + sl::BODY_FORMAT::LAST, mBodyTrkFmt, + " * Body Track. format: "); + + sl_tools::getParam( + shared_from_this(), + "body_tracking.allow_reduced_precision_inference", + mBodyTrkReducedPrecision, mBodyTrkReducedPrecision, + " * Body Track. allow reduced precision: "); + + sl_tools::getParam( + shared_from_this(), "body_tracking.max_range", + mBodyTrkMaxRange, mBodyTrkMaxRange, + " * Body Track. maximum range [m]: ", false, 0.1, 40.0); + + sl_tools::getEnumParam( + shared_from_this(), "body_tracking.body_kp_selection", "FULL", + sl::BODY_KEYPOINTS_SELECTION::FULL, + sl::BODY_KEYPOINTS_SELECTION::LAST, mBodyTrkKpSelection, + " * Body Track. KP selection: "); + + sl_tools::getParam( + shared_from_this(), "body_tracking.enable_body_fitting", + mBodyTrkFitting, mBodyTrkFitting, " * Body fitting: "); + + sl_tools::getParam( + shared_from_this(), "body_tracking.enable_tracking", + mBodyTrkEnableTracking, mBodyTrkEnableTracking, + " * Body joints tracking: "); + + sl_tools::getParam( + shared_from_this(), "body_tracking.prediction_timeout_s", + mBodyTrkPredTimeout, mBodyTrkPredTimeout, + " * Body Track. prediction timeout [sec]: ", false, 0.0, 300.0); + + sl_tools::getParam( + shared_from_this(), "body_tracking.confidence_threshold", + mBodyTrkConfThresh, mBodyTrkConfThresh, + " * Body Track. confidence thresh.: ", true, 0.0, 100.0); + + sl_tools::getParam( + shared_from_this(), + "body_tracking.minimum_keypoints_threshold", mBodyTrkMinKp, + mBodyTrkMinKp, " * Body Track. min. KP thresh.: ", true, 0, 38); +} + +bool ZedCamera::handleBodyTrkDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + DEBUG_BT("handleBodyTrkDynamicParams"); + + if (param.get_name() == "body_tracking.confidence_threshold") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + double val = param.as_double(); + + if ((val < 0.0) || (val >= 100.0)) { + result.successful = false; + result.reason = + param.get_name() + + " must be positive double value in the range [0.0,100.0["; + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mBodyTrkConfThresh = val; + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " << val); + } else if (param.get_name() == + "body_tracking.minimum_keypoints_threshold") + { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_INTEGER; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mBodyTrkMinKp, 0, 38, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " + << mBodyTrkMinKp); + } + + mBodyTrkRtParamsDirty = true; + return true; +} + +bool ZedCamera::startBodyTracking() +{ + DEBUG_BT("startBodyTracking"); + + if (!sl_tools::isObjDetAvailable(mCamRealModel)) { + RCLCPP_ERROR( + get_logger(), + "Body Tracking not started. The camera model does not support " + "it with the current version " + "of the SDK"); + return false; + } + + DEBUG_BT("Body Tracking available"); + + if (mDepthDisabled) { + RCLCPP_WARN( + get_logger(), + "Cannot start Body Tracking if Depth processing is disabled"); + return false; + } + + if (!mBodyTrkEnabled) { + DEBUG_BT("Body Tracking not enabled -> NOT STARTING"); + return false; + } + + if (!mCamera2BaseTransfValid || !mSensor2CameraTransfValid || + !mSensor2BaseTransfValid) + { + DEBUG_BT( + "Tracking transforms not yet ready, Body Tracking starting postponed"); + return false; + } + + RCLCPP_INFO(get_logger(), "=== Starting Body Tracking ==="); + + sl::BodyTrackingParameters bt_p; + bt_p.allow_reduced_precision_inference = mBodyTrkReducedPrecision; + bt_p.body_format = mBodyTrkFmt; + bt_p.body_selection = mBodyTrkKpSelection; + bt_p.detection_model = mBodyTrkModel; + bt_p.enable_body_fitting = mBodyTrkFitting; + bt_p.enable_segmentation = false; + bt_p.enable_tracking = mBodyTrkEnableTracking; + bt_p.max_range = mBodyTrkMaxRange; + bt_p.prediction_timeout_s = mBodyTrkPredTimeout; + + sl::ERROR_CODE btError = mZed->enableBodyTracking(bt_p); + + if (btError != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Body Tracking error: " << sl::toString(btError)); + + mBodyTrkRunning = false; + return false; + } + + DEBUG_BT("Body Tracking enabled"); + + if (!mPubBodyTrk) { + mPubBodyTrk = create_publisher( + mBodyTrkTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + "Advertised on topic " << mPubBodyTrk->get_topic_name()); + } + + DEBUG_BT("Body Tracking publisher created"); + + mBodyTrkRunning = true; + return true; +} + +void ZedCamera::stopBodyTracking() +{ + if (mBodyTrkRunning) { + RCLCPP_INFO(get_logger(), "=== Stopping Body Tracking ==="); + mBodyTrkRunning = false; + mBodyTrkEnabled = false; + mZed->disableBodyTracking(); + + // ----> Send an empty message to indicate that no more objects are tracked + // (e.g clean RVIZ2) + auto objMsg = std::make_unique(); + + objMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + objMsg->header.frame_id = mLeftCamFrameId; + + objMsg->objects.clear(); + + DEBUG_STREAM_OD( + "Publishing EMPTY OBJ message " + << mPubBodyTrk->get_topic_name()); + try { + if (mPubBodyTrk) {mPubBodyTrk->publish(std::move(objMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + // <---- Send an empty message to indicate that no more objects are tracked + // (e.g clean RVIZ2) + } +} + +void ZedCamera::processBodies(rclcpp::Time t) +{ + // DEBUG_BT("processBodies"); + + size_t bt_sub_count = 0; + + try { + if (mPubBodyTrk) {bt_sub_count = count_subscribers(mPubBodyTrk->get_topic_name());} + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_OD("processBodies: Exception while counting subscribers"); + return; + } + + if (bt_sub_count < 1) { + mBodyTrkSubscribed = false; + return; + } + + sl_tools::StopWatch btElabTimer(get_clock()); + + mBodyTrkSubscribed = true; + + // ----> Update runtime parameters only when changed + if (mBodyTrkRtParamsDirty) { + sl::BodyTrackingRuntimeParameters bt_params_rt; + bt_params_rt.detection_confidence_threshold = mBodyTrkConfThresh; + bt_params_rt.minimum_keypoints_threshold = mBodyTrkMinKp; + mZed->setBodyTrackingRuntimeParameters(bt_params_rt); + mBodyTrkRtParamsDirty = false; + } + // <---- Update runtime parameters only when changed + + sl::Bodies bodies; + sl::ERROR_CODE btRes = + mZed->retrieveBodies(bodies); + + if (btRes != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Body Tracking error: " << sl::toString(btRes)); + return; + } + + if (!bodies.is_new) { // Async body tracking. Update data only if new + // detection is available + DEBUG_BT("No new bodies detected"); + return; + } + + size_t bodyCount = bodies.body_list.size(); + + DEBUG_STREAM_BT("Detected " << bodyCount << " bodies"); + + auto bodyMsg = std::make_unique(); + + bodyMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + bodyMsg->header.frame_id = mLeftCamFrameId; + + bodyMsg->objects.resize(bodyCount); + + size_t idx = 0; + for (const auto & body : bodies.body_list) { + std::string label = "Body_"; + label += std::to_string(body.id); + DEBUG_STREAM_BT("Processing body: " << label); + bodyMsg->objects[idx].label = sl::String(label.c_str()); + bodyMsg->objects[idx].sublabel = ""; + bodyMsg->objects[idx].label_id = body.id; + bodyMsg->objects[idx].confidence = body.confidence; + DEBUG_BT(" - Info OK"); + + memcpy( + &(bodyMsg->objects[idx].position[0]), &(body.position[0]), + 3 * sizeof(float)); + memcpy( + &(bodyMsg->objects[idx].position_covariance[0]), + &(body.position_covariance[0]), 6 * sizeof(float)); + memcpy( + &(bodyMsg->objects[idx].velocity[0]), &(body.velocity[0]), + 3 * sizeof(float)); + DEBUG_BT(" - Pos/Cov/Speed OK"); + + bodyMsg->objects[idx].tracking_available = mBodyTrkEnableTracking; + bodyMsg->objects[idx].tracking_state = + static_cast(body.tracking_state); + bodyMsg->objects[idx].action_state = + static_cast(body.action_state); + DEBUG_BT(" - Status OK"); + + if (body.bounding_box_2d.size() == 4) { + memcpy( + &(bodyMsg->objects[idx].bounding_box_2d.corners[0]), + &(body.bounding_box_2d[0]), 8 * sizeof(unsigned int)); + } + DEBUG_BT(" - BBox 2D OK"); + if (body.bounding_box.size() == 8) { + memcpy( + &(bodyMsg->objects[idx].bounding_box_3d.corners[0]), + &(body.bounding_box[0]), 24 * sizeof(float)); + } + DEBUG_BT(" - BBox 3D OK"); + + memcpy( + &(bodyMsg->objects[idx].dimensions_3d[0]), &(body.dimensions[0]), + 3 * sizeof(float)); + DEBUG_BT(" - Dims OK"); + + bodyMsg->objects[idx].body_format = static_cast(mBodyTrkFmt); + + if (body.head_bounding_box_2d.size() == 4) { + memcpy( + &(bodyMsg->objects[idx].head_bounding_box_2d.corners[0]), + &(body.head_bounding_box_2d[0]), 8 * sizeof(unsigned int)); + } + if (body.head_bounding_box.size() == 8) { + memcpy( + &(bodyMsg->objects[idx].head_bounding_box_3d.corners[0]), + &(body.head_bounding_box[0]), 24 * sizeof(float)); + } + + memcpy( + &(bodyMsg->objects[idx].head_position[0]), + &(body.head_position[0]), 3 * sizeof(float)); + + bodyMsg->objects[idx].skeleton_available = true; + + uint8_t kp_size = body.keypoint_2d.size(); + uint8_t kp_size_3d = body.keypoint.size(); + DEBUG_STREAM_BT(" * Skeleton KP: " << static_cast(kp_size)); + if (kp_size <= 70) { + memcpy( + &(bodyMsg->objects[idx].skeleton_2d.keypoints[0]), + &(body.keypoint_2d[0]), 2 * kp_size * sizeof(float)); + + uint8_t kp_copy = std::min(kp_size, kp_size_3d); + memcpy( + &(bodyMsg->objects[idx].skeleton_3d.keypoints[0]), + &(body.keypoint[0]), 3 * kp_copy * sizeof(float)); + } + + // ---------------------------------- + // at the end of the loop + + // TODO(Walter) Add support for + // body.global_root_orientation; + // body.local_orientation_per_joint; + // body.local_orientation_per_joint; + + idx++; + } + + DEBUG_STREAM_OD("Publishing BODY TRK message"); + try { + if (mPubBodyTrk) {mPubBodyTrk->publish(std::move(bodyMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + + // ----> Diagnostic information update + mBodyTrkElabMean_sec->addValue(btElabTimer.toc()); + mBodyTrkPeriodMean_sec->addValue(mBtFreqTimer.toc()); + mBtFreqTimer.tic(); + // <---- Diagnostic information update +} + +} // namespace stereolabs diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_main.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_main.cpp new file mode 100644 index 000000000..05def0729 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_main.cpp @@ -0,0 +1,10032 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_component.hpp" + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "sl_logging.hpp" + +#ifdef FOUND_HUMBLE +#include +#elif defined FOUND_IRON +#include +#elif defined FOUND_JAZZY +#include +#elif defined FOUND_ROLLING +#include +#elif defined FOUND_FOXY +#include +#else +#error Unsupported ROS 2 distro +#endif + +#include + +#include "sl_tools.hpp" + +using namespace std::chrono_literals; +using namespace std::placeholders; + +// Used for simulation data input +#define ZED_SDK_PORT 30000 + +namespace stereolabs +{ + +ZedCamera::ZedCamera(const rclcpp::NodeOptions & options) +: Node("zed_node", options), + mDepthDisabled(false), // 530 + mStreamingServerRequired(false), // 647 + mQos(QOS_QUEUE_SIZE), // 693 + mThreadStop(false), // 954 + mNodeDeinitialized(false), // 955 + mStreamingServerRunning(false), // 1020 + mUptimer(get_clock()), // 1046 + mDiagUpdater(this), // 1075 + mImuTfFreqTimer(get_clock()), // 1077 + mGrabFreqTimer(get_clock()), // 1078 + mImuFreqTimer(get_clock()), // 1079 + mBaroFreqTimer(get_clock()), // 1080 + mMagFreqTimer(get_clock()), // 1081 + mOdomFreqTimer(get_clock()), // 1082 + mPoseFreqTimer(get_clock()), // 1083 + mPcPubFreqTimer(get_clock()), // 1084 + mVdPubFreqTimer(get_clock()), // 1085 + mSensPubFreqTimer(get_clock()), // 1086 + mOdFreqTimer(get_clock()), // 1087 + mBtFreqTimer(get_clock()), // 1088 + mPcFreqTimer(get_clock()), // 1089 + mGnssFixFreqTimer(get_clock()), // 1090 + mFrameTimestamp(TIMEZERO_ROS), // 1097 + mGnssTimestamp(TIMEZERO_ROS), // 1098 + mLastTs_imu(TIMEZERO_ROS), // 1099 + mLastTs_baro(TIMEZERO_ROS), // 1100 + mLastTs_mag(TIMEZERO_ROS), // 1101 + mLastTs_odom(TIMEZERO_ROS), // 1102 + mLastTs_pose(TIMEZERO_ROS), // 1103 + mLastTs_pc(TIMEZERO_ROS), // 1104 + mPrevTs_pc(TIMEZERO_ROS), // 1105 + mLastClock(TIMEZERO_ROS), // 1107 + mSetSvoFrameCheckTimer(get_clock()) // 1136 +{ + try { + mUsingIPC = options.use_intra_process_comms(); + + RCLCPP_INFO(get_logger(), "================================"); + RCLCPP_INFO(get_logger(), " ZED Camera Component "); + RCLCPP_INFO(get_logger(), "================================"); + RCLCPP_INFO(get_logger(), " * IPC: %s", mUsingIPC ? "enabled" : "disabled"); + + auto context = options.context(); + + rclcpp::contexts::get_global_default_context()->is_valid(); + + if (!rclcpp::contexts::get_global_default_context() || + !rclcpp::contexts::get_global_default_context()->is_valid()) + { + RCLCPP_ERROR(get_logger(), "Global context is null!"); + exit(EXIT_FAILURE); + } + + if (!context || !context->is_valid()) { + RCLCPP_ERROR(get_logger(), "Node context is null!"); + exit(EXIT_FAILURE); + } + + // Set the name of the main thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_main")).c_str()); + + if (((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < + (SDK_MAJOR_MIN_SUPP * 10 + SDK_MINOR_MIN_SUPP)) || + ((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) > + (SDK_MAJOR_MAX_SUPP * 10 + SDK_MINOR_MAX_SUPP))) + { + RCLCPP_ERROR_STREAM( + get_logger(), + "This version of the ZED ROS2 wrapper is designed to work with ZED SDK " + "v" << static_cast(SDK_MAJOR_MIN_SUPP) + << "." << static_cast(SDK_MINOR_MIN_SUPP) << " or newer up to v" << + static_cast(SDK_MAJOR_MAX_SUPP) << "." << static_cast(SDK_MINOR_MAX_SUPP) << + "."); + RCLCPP_INFO_STREAM( + get_logger(), "* Detected SDK v" + << ZED_SDK_MAJOR_VERSION << "." + << ZED_SDK_MINOR_VERSION << "." + << ZED_SDK_PATCH_VERSION << "-" + << ZED_SDK_BUILD_ID); + RCLCPP_INFO(get_logger(), "Node stopped. Press Ctrl+C to exit."); + exit(EXIT_FAILURE); + } + + #if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 50 + sl::setEnvironmentVariable("ZED_SDK_DISABLE_PROGRESS_BAR_LOG", "1"); + #endif + + // ----> Start a "one shot timer" to initialize the node and make `shared_from_this` available + DEBUG_COMM("Creating one-shot initialization timer..."); + mInitTimer = create_wall_timer( + std::chrono::milliseconds(50), + std::bind(&ZedCamera::initNode, this)); + // <---- Start a "one shot timer" to initialize the node and make `shared_from_this` available + + DEBUG_COMM("Waiting for node initialization..."); + } catch (const std::exception & e) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Exception during ZedCamera component initialization: " << e.what()); + } catch (...) { + RCLCPP_ERROR( + get_logger(), + "Unknown exception during ZedCamera component initialization"); + } +} + +void ZedCamera::initNode() +{ + RCLCPP_INFO(get_logger(), " * namespace: %s", get_namespace()); + RCLCPP_INFO(get_logger(), " * node name: %s", get_name()); + RCLCPP_INFO(get_logger(), "================================"); + + RCLCPP_INFO(get_logger(), "Starting node initialization..."); + + // Stop the timer for "one shot" initialization + mInitTimer->cancel(); + + // ----> Publishers/Subscribers options +#ifndef FOUND_FOXY + mPubOpt.qos_overriding_options = + rclcpp::QosOverridingOptions::with_default_policies(); + mSubOpt.qos_overriding_options = + rclcpp::QosOverridingOptions::with_default_policies(); +#endif + // <---- Publishers/Subscribers options + + // Parameters initialization + initParameters(); + + // ----> Diagnostic initialization + std::string info = sl::toString(mCamUserModel).c_str(); + mDiagUpdater.add( + info, this, + &ZedCamera::callback_updateDiagnostic); + std::string hw_id = std::string("Stereolabs camera: ") + mCameraName; + mDiagUpdater.setHardwareID(hw_id); + // <---- Diagnostic initialization + + // Services initialization + initServices(); + + // ----> Start camera + if (!startCamera()) { + exit(EXIT_FAILURE); + } + // <---- Start camera + + // Callback when the node is destroyed + // This is used to stop the camera when the node is destroyed + // and to stop the timers + + // Close camera callback before shutdown + using rclcpp::contexts::get_global_default_context; + get_global_default_context()->add_pre_shutdown_callback( + [this]() { + DEBUG_COMM("ZED Component is shutting down"); + deInitNode(); + DEBUG_COMM("ZED Component is shutting down - done"); + }); + + // Dynamic parameters callback + mParamChangeCallbackHandle = add_on_set_parameters_callback( + std::bind(&ZedCamera::callback_dynamicParamChange, this, _1)); +} + +void ZedCamera::deInitNode() +{ + if (mNodeDeinitialized) { + return; + } + mNodeDeinitialized = true; + + DEBUG_COMM("De-initializing ZED Component"); + + // ----> Stop subscribers + DEBUG_COMM("Stopping subscribers"); + mClickedPtSub.reset(); + mGnssFixSub.reset(); + mClockSub.reset(); + DEBUG_COMM("... subscribers stopped"); + // <---- Stop subscribers + + if (mObjDetRunning) { + DEBUG_COMM("Stopping Object Detection"); + std::lock_guard lock(mObjDetMutex); + stopObjDetect(); + DEBUG_COMM("... Object Detection stopped"); + } + + if (mBodyTrkRunning) { + DEBUG_COMM("Stopping Body Tracking"); + std::lock_guard lock(mBodyTrkMutex); + stopBodyTracking(); + DEBUG_COMM("... Body Tracking stopped"); + } + + if (mSpatialMappingRunning) { + DEBUG_COMM("Stopping 3D Mapping"); + std::lock_guard lock(mMappingMutex); + stop3dMapping(); + DEBUG_COMM("... 3D Mapping stopped"); + } + + if (mPathTimer) { + DEBUG_COMM("Stopping path timer"); + mPathTimer->cancel(); + DEBUG_COMM("... path timer stopped"); + } + + if (mFusedPcTimer) { + DEBUG_COMM("Stopping fused cloud timer"); + mFusedPcTimer->cancel(); + DEBUG_COMM("... fused cloud timer stopped"); + } + + if (mTempPubTimer) { + DEBUG_COMM("Stopping temperatures timer"); + mTempPubTimer->cancel(); + DEBUG_COMM("... temperatures timer stopped"); + } + + // ----> Verify that all the threads are not active + DEBUG_COMM("Stopping running threads"); + if (!mThreadStop) { + mThreadStop = true; + } + + DEBUG_COMM("Waiting for sensors thread..."); + try { + if (mSensThread.joinable()) { + mSensThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Sensors thread joining exception: " << e.what()); + } + DEBUG_COMM("... sensors thread stopped"); + + DEBUG_COMM("Waiting for video/depth thread..."); + try { + if (mVdThread.joinable()) { + mVdThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Video/Depth thread joining exception: " << e.what()); + } + DEBUG_COMM("... video/depth thread stopped"); + + DEBUG_COMM("Waiting for Point Cloud thread..."); + try { + if (mPcThread.joinable()) { + mPcThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Pointcloud thread joining exception: " << e.what()); + } + DEBUG_COMM("... Point Cloud thread stopped"); + + DEBUG_COMM("Waiting for grab thread..."); + try { + if (mGrabThread.joinable()) { + mGrabThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Grab thread joining exception: " << e.what()); + } + DEBUG_COMM("... grab thread stopped"); + // <---- Verify that all the threads are not active + + DEBUG_COMM("All threads stopped. Closing camera..."); + closeCamera(); + DEBUG_COMM("... camera closed"); +} + +ZedCamera::~ZedCamera() +{ + deInitNode(); + if (_debugCommon) { + std::cout << "[ZedCamera] Destructor called" << std::endl << std::flush; + } +} + +void ZedCamera::initServices() +{ + RCLCPP_INFO(get_logger(), "=== SERVICES ==="); + + std::string srv_name; + + std::string srv_prefix = "~/"; + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is disabled + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (!mDepthDisabled) { +#endif + + if (mPosTrackingEnabled) { + // Reset Odometry + srv_name = srv_prefix + mSrvResetOdomName; + mResetOdomSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_resetOdometry, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << mResetOdomSrv->get_service_name() + << "'"); + // Reset Pose + srv_name = srv_prefix + mSrvResetPoseName; + mResetPosTrkSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_resetPosTracking, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mResetPosTrkSrv->get_service_name() << "'"); + // Set Pose + srv_name = srv_prefix + mSrvSetPoseName; + mSetPoseSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_setPose, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << mSetPoseSrv->get_service_name() + << "'"); + // Save Area Memory + srv_name = srv_prefix + mSrvSaveAreaMemoryName; + mSaveAreaMemorySrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_saveAreaMemory, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mSaveAreaMemorySrv->get_service_name() << "'"); + } + } + + if (!mDepthDisabled) { + // Enable Depth Processing + srv_name = srv_prefix + mSrvEnableDepthName; + mEnableDepthSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_enableDepth, this, _1, _2, _3)); + + // Enable Object Detection + srv_name = srv_prefix + mSrvEnableObjDetName; + mEnableObjDetSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_enableObjDet, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mEnableObjDetSrv->get_service_name() << "'"); + + // Enable BodyTracking + srv_name = srv_prefix + mSrvEnableBodyTrkName; + mEnableBodyTrkSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_enableBodyTrk, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mEnableBodyTrkSrv->get_service_name() << "'"); + + if (mPosTrackingEnabled) { + // Enable Mapping + srv_name = srv_prefix + mSrvEnableMappingName; + mEnableMappingSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_enableMapping, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mEnableMappingSrv->get_service_name() << "'"); + } + } + + // Enable Streaming + srv_name = srv_prefix + mSrvEnableStreamingName; + mEnableStreamingSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_enableStreaming, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" + << mEnableStreamingSrv->get_service_name() << "'"); + + // Start SVO Recording + srv_name = srv_prefix + mSrvStartSvoRecName; + mStartSvoRecSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_startSvoRec, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << mStartSvoRecSrv->get_service_name() + << "'"); + // Stop SVO Recording + srv_name = srv_prefix + mSrvStopSvoRecName; + mStopSvoRecSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_stopSvoRec, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << mStopSvoRecSrv->get_service_name() + << "'"); + + // Pause SVO (only if the realtime playing mode is disabled) + if (mSvoMode) { +#ifndef USE_SVO_REALTIME_PAUSE + if (!mSvoRealtime) { +#endif + srv_name = srv_prefix + mSrvToggleSvoPauseName; + mPauseSvoSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_pauseSvoInput, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" << mPauseSvoSrv->get_service_name() << "'"); +#ifndef USE_SVO_REALTIME_PAUSE + } +#endif + + //Set Service for SVO frame + srv_name = srv_prefix + mSrvSetSvoFrameName; + mSetSvoFrameSrv = create_service( + srv_name, + std::bind(&ZedCamera::callback_setSvoFrame, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" << mSetSvoFrameSrv->get_service_name() << "'"); + } + + // Set ROI + srv_name = srv_prefix + mSrvSetRoiName; + mSetRoiSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_setRoi, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" << mSetRoiSrv->get_service_name() << "'"); + // Reset ROI + srv_name = srv_prefix + mSrvResetRoiName; + mResetRoiSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_resetRoi, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" << mResetRoiSrv->get_service_name() << "'"); + + if (mPosTrackingEnabled && mGnssFusionEnabled) { + // To Latitude/Longitude + srv_name = srv_prefix + mSrvToLlName; + mToLlSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_toLL, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" << mToLlSrv->get_service_name() << "'"); + // From Latitude/Longitude + srv_name = srv_prefix + mSrvFromLlName; + mFromLlSrv = create_service( + srv_name, std::bind(&ZedCamera::callback_fromLL, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on service: '" << mFromLlSrv->get_service_name() << "'"); + } +} + +std::string ZedCamera::getParam( + std::string paramName, + std::vector> & outVal) +{ + outVal.clear(); + + rcl_interfaces::msg::ParameterDescriptor descriptor; + descriptor.read_only = true; + + declare_parameter( + paramName, rclcpp::ParameterValue(std::string("[]")), + descriptor); + + std::string out_str; + + if (!get_parameter(paramName, out_str)) { + RCLCPP_WARN_STREAM( + get_logger(), + "The parameter '" + << paramName + << "' is not available or is not valid, using the default " + "value: []"); + } + + if (out_str.empty()) { + return std::string(); + } + + std::string error; + outVal = sl_tools::parseStringMultiVector_float(out_str, error); + + if (error != "") { + RCLCPP_WARN_STREAM( + get_logger(), "Error parsing " + << paramName + << " parameter: " << error.c_str()); + RCLCPP_WARN_STREAM( + get_logger(), + " " << paramName << " string was " << out_str.c_str()); + + outVal.clear(); + } + + return out_str; +} + + +void ZedCamera::initParameters() +{ + // DEBUG parameters + getDebugParams(); + + // TOPIC parameters + getTopicEnableParams(); + + // SIMULATION parameters + getSimParams(); + + // SVO parameters + getSvoParams(); + + // GENERAL parameters + getGeneralParams(); + + // VIDEO parameters + if (!mSvoMode && !mSimMode) { + getVideoParams(); + } + + // DEPTH parameters + getDepthParams(); + + // POS. TRACKING and GNSS FUSION parameters +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (!mDepthDisabled) { +#endif + // Positional Tracking parameters + getPosTrackingParams(); + + if (mPosTrackingEnabled) { + // GNSS Fusion parameters + getGnssFusionParams(); + } else { + mGnssFusionEnabled = false; + } + } else { + mGnssFusionEnabled = false; + mPosTrackingEnabled = false; + mPublishTF = false; + mPublishOdomPose = false; + mPublishPoseCov = false; + mPublishPath = false; + } + + if (!mDepthDisabled) { + // Region of Interest parameters + getRoiParams(); + } else { + mRoyPolyParam.clear(); + mAutoRoiEnabled = false; + } + + // SENSORS parameters + if (!sl_tools::isZED(mCamUserModel)) { + getSensorsParams(); + } + + if (!mDepthDisabled && mPosTrackingEnabled) { + getMappingParams(); + } else { + mMappingEnabled = false; + } + + // AI PARAMETERS +#if ((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 52) + if (!mDepthDisabled && mPosTrackingEnabled) { +#else + if (!mDepthDisabled) { +#endif + if (sl_tools::isObjDetAvailable(mCamUserModel)) { + getOdParams(); + getBodyTrkParams(); + } + } else { + mObjDetEnabled = false; + mBodyTrkEnabled = false; + } + + getStreamingServerParams(); + + getAdvancedParams(); +} + +std::string ZedCamera::parseRoiPoly( + const std::vector> & in_poly, + std::vector & out_poly) +{ + out_poly.clear(); + + std::string ss; + ss = "["; + + size_t poly_size = in_poly.size(); + + if (poly_size < 3) { + if (poly_size != 0) { + RCLCPP_WARN_STREAM( + get_logger(), + "A vector with " + << poly_size + << " points is not enough to create a polygon to set a Region " + "of Interest."); + return std::string(); + } + } else { + for (size_t i = 0; i < poly_size; ++i) { + if (in_poly[i].size() != 2) { + RCLCPP_WARN_STREAM( + get_logger(), "The component with index '" + << i + << "' of the ROI vector " + "has not the correct size."); + out_poly.clear(); + return std::string(); + } else if (in_poly[i][0] < 0.0 || in_poly[i][1] < 0.0 || + in_poly[i][0] > 1.0 || in_poly[i][1] > 1.0) + { + RCLCPP_WARN_STREAM( + get_logger(), "The component with index '" + << i + << "' of the ROI vector " + "is not a " + "valid normalized point: [" + << in_poly[i][0] << "," + << in_poly[i][1] << "]."); + out_poly.clear(); + return std::string(); + } else { + sl::float2 pt; + pt.x = in_poly[i][0]; + pt.y = in_poly[i][1]; + out_poly.push_back(pt); + ss += "["; + ss += std::to_string(pt.x); + ss += ","; + ss += std::to_string(pt.y); + ss += "]"; + } + + if (i != poly_size - 1) { + ss += ","; + } + } + } + ss += "]"; + + return ss; +} + +void ZedCamera::getDebugParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== DEBUG parameters ==="); + + sl_tools::getParam( + shared_from_this(), "debug.sdk_verbose", mVerbose, + mVerbose, " * SDK Verbose: ", false, 0, 9999); + sl_tools::getParam( + shared_from_this(), "debug.use_pub_timestamps", + mUsePubTimestamps, mUsePubTimestamps, + " * Use Pub Timestamps: "); + sl_tools::getParam( + shared_from_this(), "debug.sdk_verbose_log_file", + mVerboseLogFile, mVerboseLogFile, " * SDK Verbose File: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_common", _debugCommon, + _debugCommon, " * Debug Common: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_dyn_params", + _debugDynParams, _debugDynParams, + " * Debug Dynamic Parameters: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_grab", _debugGrab, + _debugGrab, " * Debug Grab (low level): "); + sl_tools::getParam( + shared_from_this(), "debug.debug_sim", _debugSim, + _debugSim, " * Debug Simulation: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_video_depth", + _debugVideoDepth, _debugVideoDepth, + " * Debug Video/Depth: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_camera_controls", + _debugCamCtrl, _debugCamCtrl, + " * Debug Control settings: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_point_cloud", + _debugPointCloud, _debugPointCloud, + " * Debug Point Cloud: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_tf", _debugTf, _debugTf, + " * Debug TF: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_positional_tracking", + _debugPosTracking, _debugPosTracking, + " * Debug Positional Tracking: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_gnss", _debugGnss, + _debugGnss, " * Debug GNSS: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_sensors", _debugSensors, + _debugSensors, " * Debug sensors: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_mapping", _debugMapping, + _debugMapping, " * Debug Mapping: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_object_detection", + _debugObjectDet, _debugObjectDet, + " * Debug Object Detection: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_body_tracking", + _debugBodyTrk, _debugBodyTrk, " * Debug Body Tracking: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_streaming", + _debugStreaming, _debugStreaming, " * Debug Streaming: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_roi", _debugRoi, + _debugRoi, " * Debug ROI: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_nitros", _debugNitros, + _debugNitros, " * Debug Nitros: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_advanced", _debugAdvanced, + _debugAdvanced, " * Debug Advanced: "); + + mDebugMode = _debugCommon || _debugDynParams || _debugGrab || _debugSim || + _debugVideoDepth || _debugCamCtrl || _debugPointCloud || + _debugTf || _debugPosTracking || _debugGnss || _debugSensors || + _debugMapping || _debugObjectDet || _debugBodyTrk || + _debugAdvanced || _debugRoi || _debugStreaming || _debugNitros; + + if (mDebugMode) { + rcutils_ret_t res = rcutils_logging_set_logger_level( + get_logger().get_name(), RCUTILS_LOG_SEVERITY_DEBUG); + + if (res != RCUTILS_RET_OK) { + RCLCPP_INFO(get_logger(), "Error setting DEBUG level for logger"); + } else { + RCLCPP_INFO(get_logger(), " + Debug Mode enabled +"); + } + } else { + rcutils_ret_t res = rcutils_logging_set_logger_level( + get_logger().get_name(), RCUTILS_LOG_SEVERITY_INFO); + + if (res != RCUTILS_RET_OK) { + RCLCPP_INFO(get_logger(), "Error setting INFO level for logger"); + } + } + + DEBUG_STREAM_COMM( + "[ROS 2] Using RMW_IMPLEMENTATION " + << rmw_get_implementation_identifier()); + + const char * nitrosReason = "not_available"; + +#ifdef FOUND_ISAAC_ROS_NITROS + nitrosReason = "enabled"; + + sl_tools::getParam( + shared_from_this(), "debug.disable_nitros", + _nitrosDisabled, _nitrosDisabled); + + bool nitrosDisabledByParam = _nitrosDisabled; + + if (nitrosDisabledByParam) { + nitrosReason = "param_debug.disable_nitros"; + } + + if (!_nitrosDisabled && mUsingIPC) { + RCLCPP_WARN( + get_logger(), + "NITROS transport is incompatible with ROS 2 Intra-Process Communication " + "(IPC). NITROS will be disabled. To use NITROS, launch with " + "enable_ipc:=false. NITROS provides its own zero-copy transport, " + "so disabling IPC does not reduce performance."); + _nitrosDisabled = true; + nitrosReason = "auto_disabled_ipc_incompatibility"; + } + + if (nitrosDisabledByParam) { + RCLCPP_WARN( + get_logger(), + "NITROS is available, but is disabled by 'debug.disable_nitros'"); + } +#else + _nitrosDisabled = true; // Force disable NITROS if not available +#endif + + RCLCPP_DEBUG( + get_logger(), + "Transport summary: IPC=%s, NITROS=%s, reason=%s", + mUsingIPC ? "enabled" : "disabled", + _nitrosDisabled ? "disabled" : "enabled", + nitrosReason); +} + +void ZedCamera::getTopicEnableParams() +{ + RCLCPP_INFO(get_logger(), "=== TOPIC selection parameters ==="); + + // General topics + sl_tools::getParam( + shared_from_this(), "general.publish_status", mPublishStatus, + mPublishStatus, " * Publish Status: "); + + // Image topics +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + sl_tools::getParam( + shared_from_this(), "video.enable_24bit_output", + m24bitMode, m24bitMode); + if (m24bitMode) { + RCLCPP_INFO(get_logger(), " * Image format: BGR 24-bit"); + } else { + RCLCPP_INFO(get_logger(), " * Image format: BGRA 32-bit"); + } +#endif + sl_tools::getParam( + shared_from_this(), "video.publish_left_right", mPublishImgLeftRight, + mPublishImgLeftRight, " * Publish Left/Right images: "); + sl_tools::getParam( + shared_from_this(), "video.publish_raw", mPublishImgRaw, + mPublishImgRaw, " * Publish Raw images: "); + sl_tools::getParam( + shared_from_this(), "video.publish_gray", mPublishImgGray, + mPublishImgGray, " * Publish Gray images: "); + sl_tools::getParam( + shared_from_this(), "video.publish_rgb", mPublishImgRgb, + mPublishImgRgb, " * Publish RGB image: "); + sl_tools::getParam( + shared_from_this(), "video.publish_stereo", mPublishImgStereo, + mPublishImgStereo, " * Publish Stereo image: "); + + // Region of Interest topics + sl_tools::getParam( + shared_from_this(), "region_of_interest.publish_roi_mask", mPublishImgRoiMask, + mPublishImgRoiMask, " * Publish ROI Mask image: "); + + // Depth topics + sl_tools::getParam( + shared_from_this(), "depth.publish_depth_map", mPublishDepthMap, + mPublishDepthMap, " * Publish Depth Map: "); + sl_tools::getParam( + shared_from_this(), "depth.publish_depth_info", mPublishDepthInfo, + mPublishDepthInfo, " * Publish Depth Info: "); + sl_tools::getParam( + shared_from_this(), "depth.publish_point_cloud", mPublishPointcloud, + mPublishPointcloud, " * Publish Point Cloud: "); + sl_tools::getParam( + shared_from_this(), "depth.publish_depth_confidence", mPublishConfidence, + mPublishConfidence, " * Publish Depth Confidence: "); + sl_tools::getParam( + shared_from_this(), "depth.publish_disparity", mPublishDisparity, + mPublishDisparity, " * Publish Disparity: "); + + // Sensor topics + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu", mPublishSensImu, + mPublishSensImu, " * Publish IMU: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu_raw", mPublishSensImuRaw, + mPublishSensImuRaw, " * Publish IMU Raw: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_cam_imu_transf", mPublishSensImuTransf, + mPublishSensImuTransf, " * Publish LeftCam/IMU Transf.: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_mag", mPublishSensMag, + mPublishSensMag, " * Publish Magnetometer: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_baro", mPublishSensBaro, + mPublishSensBaro, " * Publish Barometer: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_temp", mPublishSensTemp, + mPublishSensTemp, " * Publish Temperature: "); + + // Localization topics + sl_tools::getParam( + shared_from_this(), "pos_tracking.publish_odom_pose", mPublishOdomPose, + mPublishOdomPose, " * Publish Odometry/Pose: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.publish_pose_cov", mPublishPoseCov, + mPublishPoseCov, " * Publish Pose with Covariance: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.publish_cam_path", mPublishPath, + mPublishPath, " * Publish Camera Path: "); + + // Mapping topics + sl_tools::getParam( + shared_from_this(), "mapping.publish_det_plane", mPublishDetPlane, + mPublishDetPlane, " * Publish Detection Plane: "); +} + +void ZedCamera::getSimParams() +{ + // SIMULATION active? + sl_tools::getParam( + shared_from_this(), "simulation.sim_enabled", mSimMode, + mSimMode); + + if (!get_parameter("use_sim_time", mUseSimTime)) { + RCLCPP_WARN( + get_logger(), + "The parameter 'use_sim_time' is not available or is not " + "valid, using the default value."); + } + + if (mSimMode) { + RCLCPP_INFO(get_logger(), " === SIMULATION MODE ACTIVE ==="); + sl_tools::getParam( + shared_from_this(), "simulation.sim_address", mSimAddr, + mSimAddr, " * Sim. server address: "); + sl_tools::getParam( + shared_from_this(), "simulation.sim_port", mSimPort, + mSimPort, " * Sim. server port: "); + + RCLCPP_INFO_STREAM( + get_logger(), + " * Use Sim Time: " << (mUseSimTime ? "TRUE" : "FALSE")); + } else if (mUseSimTime) { + RCLCPP_WARN( + get_logger(), + "The parameter 'use_sim_time' is set to 'true', but simulation " + "mode is not enabled. UNPREDICTABLE BEHAVIORS EXPECTED."); + } +} + +void ZedCamera::getGeneralParams() +{ + rclcpp::Parameter paramVal; + + mStreamMode = false; + if (!mSvoMode) { + RCLCPP_INFO(get_logger(), "=== LOCAL STREAMING parameters ==="); + sl_tools::getParam( + shared_from_this(), "stream.stream_address", + std::string(), mStreamAddr); + if (mStreamAddr != "") { + mStreamMode = true; + sl_tools::getParam( + shared_from_this(), "stream.stream_port", mStreamPort, + mStreamPort); + RCLCPP_INFO_STREAM( + get_logger(), " * Local stream input: " << mStreamAddr << ":" << mStreamPort); + } else { + RCLCPP_INFO(get_logger(), " * Local stream input: DISABLED"); + } + } + + RCLCPP_INFO(get_logger(), "=== GENERAL parameters ==="); + + std::string camera_model = "zed2i"; + sl_tools::getParam( + shared_from_this(), "general.camera_model", camera_model, + camera_model); + if (camera_model == "zed") { + mCamUserModel = sl::MODEL::ZED; + } else if (camera_model == "zedm") { + mCamUserModel = sl::MODEL::ZED_M; + } else if (camera_model == "zed2") { + mCamUserModel = sl::MODEL::ZED2; + } else if (camera_model == "zed2i") { + mCamUserModel = sl::MODEL::ZED2i; + } else if (camera_model == "zedx") { + mCamUserModel = sl::MODEL::ZED_X; + if (mSvoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mStreamMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing a network stream from a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mSimMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Simulating a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model " << sl::toString(mCamUserModel).c_str() + << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else if (camera_model == "zedxm") { + mCamUserModel = sl::MODEL::ZED_XM; + if (mSvoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mStreamMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing a network stream from a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mSimMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Simulating a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model " << sl::toString(mCamUserModel).c_str() + << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else if (camera_model == "virtual") { + mCamUserModel = sl::MODEL::VIRTUAL_ZED_X; + + if (mSvoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mStreamMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing a network stream from a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (mSimMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Simulating a " + << sl::toString(mCamUserModel) + << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model " << sl::toString(mCamUserModel).c_str() + << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model not valid in parameter values: " << camera_model); + } + RCLCPP_INFO_STREAM( + get_logger(), " * Camera model: " << camera_model << " - " + << mCamUserModel); + + sl_tools::getParam( + shared_from_this(), "general.camera_name", mCameraName, + mCameraName, " * Camera name: "); + + if (!mSvoMode) { + if (mCamUserModel == sl::MODEL::VIRTUAL_ZED_X) { + std::string array_param = "[]"; + sl_tools::getParam( + shared_from_this(), "general.virtual_serial_numbers", + array_param, array_param, + " * Virtual Camera SNs: ", false); + + std::string error; + auto serials = sl_tools::parseStringVector_int(array_param, error); + if (serials.size() == 2) { + mCamVirtualSerialNumbers = serials; + } + + array_param = "[]"; + sl_tools::getParam( + shared_from_this(), "general.virtual_camera_ids", + array_param, array_param, + " * Virtual Camera IDs: ", false); + + auto ids = sl_tools::parseStringVector_int(array_param, error); + if (ids.size() == 2) { + mCamVirtualCameraIds = ids; + } + + // With a live virtual stereo camera at least one of "general.virtual_camera_ids" and "general.virtual_serial_numbers" + // must contain two valid values + if (ids.size() != 2 && serials.size() != 2) { + RCLCPP_ERROR( + get_logger(), + "With a Virtual Stereo Camera setup, one of 'general.virtual_serial_numbers' " + "or 'general.virtual_camera_ids' parameters must contain two " + "valid values (Left and Right camera identification)."); + exit(EXIT_FAILURE); + } + } else { + sl_tools::getParam( + shared_from_this(), "general.serial_number", + mCamSerialNumber, mCamSerialNumber, + " * Camera SN: ", false, 0); + sl_tools::getParam( + shared_from_this(), "general.camera_id", mCamId, + mCamId, " * Camera ID: ", false, -1, 256); + } + sl_tools::getParam( + shared_from_this(), "general.camera_timeout_sec", + mCamTimeoutSec, mCamTimeoutSec, + " * Camera timeout [sec]: ", false, 0, 9999); + sl_tools::getParam( + shared_from_this(), "general.camera_max_reconnect", + mMaxReconnectTemp, mMaxReconnectTemp, + " * Camera reconnection temptatives: ", false, 0, 9999); + if (mSimMode) { + RCLCPP_INFO( + get_logger(), + "* [Simulation mode] Camera framerate forced to 60 Hz"); + mCamGrabFrameRate = 60; + } else { + sl_tools::getParam( + shared_from_this(), "general.grab_frame_rate", + mCamGrabFrameRate, mCamGrabFrameRate, + " * Camera framerate: ", false, 0, 120); + } + } else { + // Set it to the maximum possible frame rate to avoid problem on the next validations + // This value will be forced to the correct SVO rate after it has been opened. + mCamGrabFrameRate = 120; + } + sl_tools::getParam( + shared_from_this(), "general.gpu_id", mGpuId, mGpuId, + " * GPU ID: ", false, -1, 999); + sl_tools::getParam( + shared_from_this(), "general.async_image_retrieval", + mAsyncImageRetrieval, mAsyncImageRetrieval, + " * Asynchronous image retrieval: "); + + sl_tools::getParam( + shared_from_this(), "general.enable_image_validity_check", + mImageValidityCheck, mImageValidityCheck, + " * Image Validity Check: ", false, 0, 10); + + // TODO(walter) ADD SVO SAVE COMPRESSION PARAMETERS + + if (mSimMode) { + RCLCPP_INFO( + get_logger(), + "* [Simulation mode] Camera resolution forced to 'HD1080'"); + mCamResol = sl::RESOLUTION::HD1080; + } else { + std::string resol = "AUTO"; + sl_tools::getParam( + shared_from_this(), "general.grab_resolution", resol, + resol); + if (resol == "AUTO") { + mCamResol = sl::RESOLUTION::AUTO; + } else if (sl_tools::isZEDX(mCamUserModel)) { + if (resol == "HD1200") { + mCamResol = sl::RESOLUTION::HD1200; + } else if (resol == "HD1080") { + mCamResol = sl::RESOLUTION::HD1080; + } else if (resol == "SVGA") { + mCamResol = sl::RESOLUTION::SVGA; + } else if (resol == "HD1536") { + mCamResol = sl::RESOLUTION::HD1536; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + } else if (resol == "XVGA") { + mCamResol = sl::RESOLUTION::XVGA; +#endif + } else { + RCLCPP_WARN( + get_logger(), + "Not valid 'general.grab_resolution' value: '%s'. Using " + "'AUTO' setting.", + resol.c_str()); + mCamResol = sl::RESOLUTION::AUTO; + } + } else { + if (resol == "HD2K") { + mCamResol = sl::RESOLUTION::HD2K; + } else if (resol == "HD1080") { + mCamResol = sl::RESOLUTION::HD1080; + } else if (resol == "HD720") { + mCamResol = sl::RESOLUTION::HD720; + } else if (resol == "VGA") { + mCamResol = sl::RESOLUTION::VGA; + } else { + RCLCPP_WARN( + get_logger(), + "Not valid 'general.grab_resolution' value: '%s'. Using " + "'AUTO' setting.", + resol.c_str()); + mCamResol = sl::RESOLUTION::AUTO; + } + } + RCLCPP_INFO_STREAM( + get_logger(), " * Camera resolution: " + << sl::toString(mCamResol).c_str()); + } + + std::string out_resol = "NATIVE"; + sl_tools::getParam( + shared_from_this(), "general.pub_resolution", out_resol, + out_resol); + if (out_resol == toString(PubRes::NATIVE)) { + mPubResolution = PubRes::NATIVE; + } else if (out_resol == toString(PubRes::CUSTOM)) { + mPubResolution = PubRes::CUSTOM; + } else { + RCLCPP_WARN( + get_logger(), + "Not valid 'general.pub_resolution' value: '%s'. Using default " + "setting instead.", + out_resol.c_str()); + out_resol = "NATIVE -> Fix the value in YAML!"; + mPubResolution = PubRes::NATIVE; + } + RCLCPP_INFO_STREAM( + get_logger(), + " * Publishing resolution: " << out_resol.c_str()); + + if (mPubResolution == PubRes::CUSTOM) { + sl_tools::getParam( + shared_from_this(), "general.pub_downscale_factor", + mCustomDownscaleFactor, mCustomDownscaleFactor, + " * Publishing downscale factor: ", false, 1.0, 5.0); + } else { + mCustomDownscaleFactor = 1.0; + } + + sl_tools::getParam( + shared_from_this(), "general.optional_opencv_calibration_file", + mOpencvCalibFile, mOpencvCalibFile, " * OpenCV custom calibration: "); + + sl_tools::getParam( + shared_from_this(), "general.self_calib", mCameraSelfCalib, + mCameraSelfCalib, " * Camera self calibration: "); + + sl_tools::getParam( + shared_from_this(), "general.camera_flip", mCameraFlip, + mCameraFlip, " * Camera flip: "); + + // Dynamic parameters + + if (mSimMode) { + RCLCPP_INFO( + get_logger(), + "* [Simulation mode] Publish framerate forced to 60 Hz"); + mVdPubRate = 60; + } + + if (mSvoMode && !mSvoRealtime) { + RCLCPP_INFO( + get_logger(), + " * [SVO mode - not realtime] Publish framerate forced to SVO Playback rate"); + mVdPubRate = 0; + } else { + sl_tools::getParam( + shared_from_this(), "general.pub_frame_rate", mVdPubRate, + mVdPubRate, " * Publish framerate [Hz]: ", true, -1.0, + static_cast(mCamGrabFrameRate)); + if (mVdPubRate <= 0.0) { + mVdPubRate = static_cast(mCamGrabFrameRate); + } + } + sl_tools::getParam( + shared_from_this(), "general.grab_compute_capping_fps", + mGrabComputeCappingFps, mGrabComputeCappingFps, + " * Grab Compute Capping FPS: ", false, 0.0, + static_cast(mCamGrabFrameRate)); +} + +void ZedCamera::getSvoParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== SVO INPUT parameters ==="); + + sl_tools::getParam( + shared_from_this(), "svo.svo_path", std::string(), + mSvoFilepath); + if (mSvoFilepath.compare("live") == 0) { + mSvoFilepath = ""; + } + + if (mSvoFilepath == "") { + mSvoMode = false; + RCLCPP_INFO_STREAM(get_logger(), " * SVO input: DISABLED"); + } else { + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO: '" << mSvoFilepath.c_str() << "'"); + mSvoMode = true; + sl_tools::getParam( + shared_from_this(), "svo.use_svo_timestamps", + mUseSvoTimestamp, mUseSvoTimestamp, + " * Use SVO timestamp: "); + if (mUseSvoTimestamp) { + sl_tools::getParam( + shared_from_this(), "svo.publish_svo_clock", + mPublishSvoClock, mPublishSvoClock, + " * Publish SVO timestamp: "); + } + + sl_tools::getParam(shared_from_this(), "svo.svo_loop", mSvoLoop, mSvoLoop); + if (mUseSvoTimestamp) { + if (mSvoLoop) { + RCLCPP_WARN( + get_logger(), + "SVO Loop is not supported when using SVO timestamps. Loop " + "playback disabled."); + mSvoLoop = false; + } + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO Loop: " << (mSvoLoop ? "TRUE" : "FALSE")); + } + sl_tools::getParam( + shared_from_this(), "svo.svo_realtime", mSvoRealtime, + mSvoRealtime, " * SVO Realtime: "); + + sl_tools::getParam( + shared_from_this(), "svo.play_from_frame", + mSvoFrameStart, mSvoFrameStart, + " * SVO start frame: ", false, 0); + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + sl_tools::getParam( + shared_from_this(), "svo.decryption_key", std::string(), + mSvoDecryptionKey); +#endif + + if (!mSvoRealtime) { + sl_tools::getParam( + shared_from_this(), "svo.replay_rate", mSvoRate, + mSvoRate, " * SVO replay rate: ", true, 0.1, 5.0); + } + } +} + +void ZedCamera::getRoiParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== REGION OF INTEREST parameters ==="); + + sl_tools::getParam( + shared_from_this(), "region_of_interest.automatic_roi", + mAutoRoiEnabled, mAutoRoiEnabled, + " * Automatic ROI generation: "); + + if (mAutoRoiEnabled) { + if (mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_1) { + RCLCPP_WARN_STREAM( + get_logger(), + "Automatic ROI generation with '" + << sl::toString(mPosTrkMode) + << "' is not recommended. Please set the parameter " + "'pos_tracking.pos_tracking_mode' to 'GEN_3' for " + "improved results."); + } + + sl_tools::getParam( + shared_from_this(), "region_of_interest.depth_far_threshold_meters", + mRoiDepthFarThresh, mRoiDepthFarThresh, " * Depth far threshold [m]: "); + sl_tools::getParam( + shared_from_this(), + "region_of_interest.image_height_ratio_cutoff", + mRoiImgHeightRationCutOff, mRoiImgHeightRationCutOff, + " * Image Height Ratio Cut Off: "); + } else { + std::string parsed_str = + this->getParam("region_of_interest.manual_polygon", mRoyPolyParam); + RCLCPP_INFO_STREAM( + get_logger(), + " * Manual ROI polygon: " << parsed_str.c_str()); + } + + if (mRoyPolyParam.size() > 0 || mAutoRoiEnabled) { + mRoiModules.clear(); + bool apply = true; + + sl_tools::getParam( + shared_from_this(), "region_of_interest.apply_to_depth", + apply, apply, " * Apply to depth: "); + if (apply) { + mRoiModules.insert(sl::MODULE::DEPTH); + } + + apply = true; + sl_tools::getParam( + shared_from_this(), + "region_of_interest.apply_to_positional_tracking", apply, + apply, " * Apply to positional tracking: "); + if (apply) { + mRoiModules.insert(sl::MODULE::POSITIONAL_TRACKING); + } + + apply = true; + sl_tools::getParam( + shared_from_this(), + "region_of_interest.apply_to_object_detection", apply, + apply, " * Apply to object detection: "); + if (apply) { + mRoiModules.insert(sl::MODULE::OBJECT_DETECTION); + } + + apply = true; + sl_tools::getParam( + shared_from_this(), + "region_of_interest.apply_to_body_tracking", apply, + apply, " * Apply to body tracking: "); + if (apply) { + mRoiModules.insert(sl::MODULE::BODY_TRACKING); + } + + apply = true; + sl_tools::getParam( + shared_from_this(), + "region_of_interest.apply_to_spatial_mapping", apply, + apply, " * Apply to spatial mapping: "); + if (apply) { + mRoiModules.insert(sl::MODULE::SPATIAL_MAPPING); + } + } +} + +void ZedCamera::getSensorsParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== SENSORS STACK parameters ==="); + if (sl_tools::isZED(mCamUserModel)) { + RCLCPP_WARN( + get_logger(), + "!!! SENSORS parameters are not used with ZED !!!"); + return; + } + + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu_tf", + mPublishImuTF, mPublishImuTF, + " * Broadcast IMU TF [not for ZED]: "); + + sl_tools::getParam( + shared_from_this(), "sensors.sensors_image_sync", + mSensCameraSync, mSensCameraSync, + " * Sensors Camera Sync: "); + + sl_tools::getParam( + shared_from_this(), "sensors.sensors_pub_rate", + mSensPubRate, mSensPubRate, " * Sensors publishing rate [Hz]: ", true, 1.0, 400.0); +} + +void ZedCamera::getMappingParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== Spatial Mapping parameters ==="); + + sl_tools::getParam( + shared_from_this(), "mapping.mapping_enabled", + mMappingEnabled, mMappingEnabled, + " * Spatial Mapping Enabled: "); + + sl_tools::getParam( + shared_from_this(), "mapping.resolution", mMappingRes, + mMappingRes, " * Spatial Mapping resolution [m]: "); + sl_tools::getParam( + shared_from_this(), "mapping.max_mapping_range", + mMappingRangeMax, mMappingRangeMax); + if (mMappingRangeMax == -1.0f) { + RCLCPP_INFO(get_logger(), " * 3D Max Mapping range: AUTO"); + } else { + RCLCPP_INFO_STREAM( + get_logger(), + " * 3D Max Mapping range [m]: " << mMappingRangeMax); + } + sl_tools::getParam( + shared_from_this(), "mapping.fused_pointcloud_freq", + mFusedPcPubRate, mFusedPcPubRate, + " * Map publishing rate [Hz]: ", true, -1.0, 30.0); + if (mFusedPcPubRate <= 0.0) { + mFusedPcPubRate = mPcPubRate; + } + + mClickedPtTopic = "/clicked_point"; + sl_tools::getParam( + shared_from_this(), "mapping.clicked_point_topic", + mClickedPtTopic, mClickedPtTopic, + " * Clicked point topic: "); + + sl_tools::getParam( + shared_from_this(), "mapping.pd_max_distance_threshold", + mPdMaxDistanceThreshold, mPdMaxDistanceThreshold, + " * Plane Det. Max Dist. Thresh.: ", false, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "mapping.pd_normal_similarity_threshold", + mPdNormalSimilarityThreshold, mPdNormalSimilarityThreshold, + " * Plane Det. Normals Sim. Thresh.: ", false, -180.0, 180.0); +} + +void ZedCamera::getPosTrackingParams() +{ + rclcpp::Parameter paramVal; + std::string paramName; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== POSITIONAL TRACKING parameters ==="); + + mBaseFrameId = mCameraName; + mBaseFrameId += "_camera_link"; + + sl_tools::getParam( + shared_from_this(), "pos_tracking.map_frame", mMapFrameId, + mMapFrameId, " * Map frame id: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.odometry_frame", + mOdomFrameId, mOdomFrameId, " * Odometry frame id: "); + + sl_tools::getParam( + shared_from_this(), "pos_tracking.pos_tracking_enabled", + mPosTrackingEnabled, mPosTrackingEnabled, + " * Positional tracking enabled: "); + + if (mPosTrackingEnabled) { + std::string pos_trk_mode_str = "AUTO"; + sl_tools::getParam( + shared_from_this(), "pos_tracking.pos_tracking_mode", + pos_trk_mode_str, pos_trk_mode_str); + bool matched = false; + bool auto_pt = false; + if (sl_tools::toUpper(pos_trk_mode_str) == "AUTO") { + // Use the SDK's own constructed default, except for 5.2.0 where + // it defaults to GEN_3 which has known issues — force GEN_1 instead +#if (ZED_SDK_MAJOR_VERSION == 5 && ZED_SDK_MINOR_VERSION == 2 && \ + ZED_SDK_PATCH_VERSION == 0) + mPosTrkMode = sl::POSITIONAL_TRACKING_MODE::GEN_1; +#else + sl::PositionalTrackingParameters defaultPtParams; + mPosTrkMode = defaultPtParams.mode; +#endif + matched = true; + auto_pt = true; + } else { + matched = sl_tools::matchSdkEnum( + pos_trk_mode_str, sl::POSITIONAL_TRACKING_MODE::GEN_1, + sl::POSITIONAL_TRACKING_MODE::LAST, mPosTrkMode); + } + if (!matched) { +#if (ZED_SDK_MAJOR_VERSION == 5 && ZED_SDK_MINOR_VERSION == 2 && \ + ZED_SDK_PATCH_VERSION == 0) + mPosTrkMode = sl::POSITIONAL_TRACKING_MODE::GEN_1; +#else + sl::PositionalTrackingParameters defaultPtParams; + mPosTrkMode = defaultPtParams.mode; +#endif + RCLCPP_WARN_STREAM( + get_logger(), + "The value of the parameter 'pos_tracking.pos_tracking_mode' is not valid: '" + << pos_trk_mode_str << "'. Using the default value."); + } + RCLCPP_INFO_STREAM( + get_logger(), " * Positional tracking mode" << (auto_pt ? " [AUTO]: " : ": ") << sl::toString( + mPosTrkMode).c_str()); + + sl_tools::getParam( + shared_from_this(), "pos_tracking.publish_tf", mPublishTF, + mPublishTF, " * Broadcast Odometry TF: "); + if (mPublishTF) { + sl_tools::getParam( + shared_from_this(), "pos_tracking.publish_map_tf", + mPublishMapTF, mPublishMapTF, " * Broadcast Pose TF: "); + } else { + mPublishMapTF = false; + } + + sl_tools::getParam( + shared_from_this(), "pos_tracking.depth_min_range", + mPosTrackDepthMinRange, mPosTrackDepthMinRange, + " * Depth minimum range: ", false, 0.0f, 40.0f); + sl_tools::getParam( + shared_from_this(), "pos_tracking.transform_time_offset", + mTfOffset, mTfOffset, " * TF timestamp offset: ", true, + -10.0, 10.0); + sl_tools::getParam( + shared_from_this(), "pos_tracking.path_pub_rate", + mPathPubRate, mPathPubRate, + " * Path publishing rate: ", true, -1.0, 120.0); + if (mPathPubRate <= 0.0) { + mPathPubRate = static_cast(mCamGrabFrameRate); + } + sl_tools::getParam( + shared_from_this(), "pos_tracking.path_max_count", + mPathMaxCount, mPathMaxCount, " * Path history lenght: ", false, -1, 10000); + + paramName = "pos_tracking.initial_base_pose"; + declare_parameter( + paramName, rclcpp::ParameterValue(mInitialBasePose), + read_only_descriptor); + if (!get_parameter(paramName, mInitialBasePose)) { + RCLCPP_WARN_STREAM( + get_logger(), + "The parameter '" + << paramName + << "' is not available or is not valid, using the default value"); + mInitialBasePose = std::vector(6, 0.0); + } + if (mInitialBasePose.size() < 6) { + RCLCPP_WARN_STREAM( + get_logger(), + "The parameter '" + << paramName + << "' must be a vector of 6 values of double type"); + mInitialBasePose = std::vector(6, 0.0); + } + RCLCPP_INFO( + get_logger(), " * Initial pose: [%g,%g,%g,%g,%g,%g,]", + mInitialBasePose[0], mInitialBasePose[1], mInitialBasePose[2], + mInitialBasePose[3], mInitialBasePose[4], mInitialBasePose[5]); + + // TODO(Walter) Fix this as soon as the `sl::Fusion` module will support loop + // closure and odometry + if (mGnssFusionEnabled) { + mAreaMemory = false; + RCLCPP_INFO(get_logger(), " * Area Memory: FALSE - Forced by GNSS usage"); + RCLCPP_INFO( + get_logger(), + " Note: loop closure (Area Memory) is disabled when using " + "GNSS fusion"); + } else { + sl_tools::getParam( + shared_from_this(), "pos_tracking.area_memory", + mAreaMemory, mAreaMemory, " * Area Memory: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.area_file_path", + mAreaMemoryFilePath, mAreaMemoryFilePath, + " * Area Memory File: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.enable_localization_only", + mLocalizationOnly, mLocalizationOnly, + " * Enable Localization Only: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.save_area_memory_on_closing", + mSaveAreaMemoryOnClosing, mSaveAreaMemoryOnClosing, + " * Save Area Memory on closing: "); + + if (mAreaMemoryFilePath.empty()) { + if (mSaveAreaMemoryOnClosing) { + RCLCPP_WARN( + get_logger(), + " * The parameter 'pos_tracking.area_file_path' is empty, " + "no Area Memory File will be saved on closing."); + mSaveAreaMemoryOnClosing = false; + } + } else { + mAreaMemoryFilePath = sl_tools::getFullFilePath(mAreaMemoryFilePath); + mAreaFileExists = std::filesystem::exists(mAreaMemoryFilePath); + + if (mAreaFileExists) { + RCLCPP_INFO_STREAM( + get_logger(), + " * Using the existing Area Memory file '" << mAreaMemoryFilePath << "'"); + if (mSaveAreaMemoryOnClosing) { + RCLCPP_INFO( + get_logger(), + " * The Area Memory file will be updated on node closing or by manually calling the `save_area_memory` service with empty parameter."); + } + } else { + RCLCPP_INFO_STREAM( + get_logger(), + " * The Area Memory file '" << mAreaMemoryFilePath << "' does not exist."); + if (mSaveAreaMemoryOnClosing) { + RCLCPP_INFO( + get_logger(), + " * The Area Memory file will be created on node closing or by manually calling the `save_area_memory` service with empty parameter."); + } + } + } + + } + sl_tools::getParam( + shared_from_this(), "pos_tracking.set_as_static", + mSetAsStatic, mSetAsStatic, " * Camera is static: "); + + sl_tools::getParam( + shared_from_this(), "pos_tracking.set_gravity_as_origin", + mSetGravityAsOrigin, mSetGravityAsOrigin, + " * Gravity as origin: "); + sl_tools::getParam( + shared_from_this(), "pos_tracking.imu_fusion", mImuFusion, + mImuFusion, " * IMU Fusion: "); + + sl_tools::getParam( + shared_from_this(), "pos_tracking.floor_alignment", + mFloorAlignment, mFloorAlignment, " * Floor Alignment: "); + + sl_tools::getParam( + shared_from_this(), + "pos_tracking.reset_odom_with_loop_closure", + mResetOdomWhenLoopClosure, mResetOdomWhenLoopClosure, + " * Reset Odometry with Loop Closure: "); + + sl_tools::getParam( + shared_from_this(), + "pos_tracking.publish_3d_landmarks", + mPublish3DLandmarks, mPublish3DLandmarks, + " * Publish 3D Landmarks: "); + + sl_tools::getParam( + shared_from_this(), + "pos_tracking.publish_lm_skip_frame", + mPublishLandmarkSkipFrame, mPublishLandmarkSkipFrame, + " * Publish Landmark Skip Frame: "); + + sl_tools::getParam( + shared_from_this(), "pos_tracking.two_d_mode", mTwoDMode, + mTwoDMode, " * 2D mode: "); + + if (mTwoDMode) { + sl_tools::getParam( + shared_from_this(), "pos_tracking.fixed_z_value", + mFixedZValue, mFixedZValue, " * Fixed Z value: "); + } + sl_tools::getParam( + shared_from_this(), + "pos_tracking.reset_pose_with_svo_loop", + mResetPoseWithSvoLoop, mResetPoseWithSvoLoop, + " * Reset pose with SVO loop: "); + } else { + mPublishTF = false; + mPublishOdomPose = false; + mPublishPoseCov = false; + mPublishPath = false; + } +} + +void ZedCamera::getGnssFusionParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== GNSS FUSION parameters ==="); + if (sl_tools::isZED(mCamUserModel)) { + RCLCPP_WARN( + get_logger(), + "!!! GNSS FUSION module cannot be enabled with ZED!!!"); + return; + } + + sl_tools::getParam( + shared_from_this(), "gnss_fusion.gnss_fusion_enabled", + mGnssFusionEnabled, mGnssFusionEnabled, + " * GNSS fusion enabled: "); + + if (mGnssFusionEnabled) { + mGnssFrameId = mCameraName + "_gnss_link"; + + sl_tools::getParam( + shared_from_this(), "gnss_fusion.gnss_fix_topic", + mGnssTopic, mGnssTopic, " * GNSS topic name: "); + sl_tools::getParam( + shared_from_this(), + "gnss_fusion.enable_reinitialization", + mGnssEnableReinitialization, mGnssEnableReinitialization, + " * GNSS Reinitialization: "); + sl_tools::getParam( + shared_from_this(), "gnss_fusion.enable_rolling_calibration", + mGnssEnableRollingCalibration, mGnssEnableRollingCalibration, + " * GNSS Rolling Calibration: "); + sl_tools::getParam( + shared_from_this(), + "gnss_fusion.enable_translation_uncertainty_target", + mGnssEnableTranslationUncertaintyTarget, + mGnssEnableTranslationUncertaintyTarget, + " * GNSS Transl. Uncert. Target: "); + sl_tools::getParam( + shared_from_this(), + "gnss_fusion.gnss_vio_reinit_threshold", + mGnssVioReinitThreshold, mGnssVioReinitThreshold, + " * GNSS VIO Reinit. Thresh.: "); + sl_tools::getParam( + shared_from_this(), "gnss_fusion.target_translation_uncertainty", + mGnssTargetTranslationUncertainty, mGnssTargetTranslationUncertainty, + " * GNSS Target Transl. Uncert.: "); + sl_tools::getParam( + shared_from_this(), "gnss_fusion.target_yaw_uncertainty", + mGnssTargetYawUncertainty, mGnssTargetYawUncertainty, + " * GNSS Target Yaw Uncert.: "); + sl_tools::getParam( + shared_from_this(), "gnss_fusion.gnss_zero_altitude", + mGnssZeroAltitude, mGnssZeroAltitude, + " * GNSS Zero Altitude: "); + + sl_tools::getParam( + shared_from_this(), "gnss_fusion.h_covariance_mul", + mGnssHcovMul, mGnssHcovMul, + " * Horiz. Covariance mult.: "); + sl_tools::getParam( + shared_from_this(), "gnss_fusion.v_covariance_mul", + mGnssVcovMul, mGnssVcovMul, + " * Vert. Covariance mult.: "); + + sl_tools::getParam( + shared_from_this(), "gnss_fusion.publish_utm_tf", + mPublishUtmTf, mPublishUtmTf, " * Publish UTM TF: "); + + sl_tools::getParam( + shared_from_this(), + "gnss_fusion.broadcast_utm_transform_as_parent_frame", + mUtmAsParent, mUtmAsParent, + " * Publish UTM TF as parent of 'map': "); + } +} + +void ZedCamera::getStreamingServerParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== STREAMING SERVER parameters ==="); + + bool stream_server = false; + sl_tools::getParam( + shared_from_this(), "stream_server.stream_enabled", + stream_server, stream_server, + " * Streaming Server enabled: "); + mStreamingServerRequired = stream_server; + + std::string codec = "H264"; + sl_tools::getParam(shared_from_this(), "stream_server.codec", codec, codec); + if (codec == "H264") { + mStreamingServerCodec = sl::STREAMING_CODEC::H264; + RCLCPP_INFO(get_logger(), " * Stream codec: H264"); + } else if (codec == "H265") { + mStreamingServerCodec = sl::STREAMING_CODEC::H265; + RCLCPP_INFO(get_logger(), " * Stream codec: H265"); + } else { + mStreamingServerCodec = sl::STREAMING_CODEC::H264; + RCLCPP_WARN_STREAM( + get_logger(), + "Invalid value for the parameter 'stream_server.codec': " << codec << + ". Using the default value."); + RCLCPP_INFO(get_logger(), " * Stream codec: H264"); + } + + sl_tools::getParam( + shared_from_this(), "stream_server.port", + mStreamingServerPort, mStreamingServerPort, + " * Stream port: ", false, 0, 65535); + + sl_tools::getParam( + shared_from_this(), "stream_server.bitrate", + mStreamingServerBitrate, mStreamingServerBitrate, " * Stream bitrate: ", false, 1000, 60000); + + sl_tools::getParam( + shared_from_this(), "stream_server.gop_size", + mStreamingServerGopSize, mStreamingServerGopSize, " * Stream GOP size: ", false, -1, 256); + + sl_tools::getParam( + shared_from_this(), "stream_server.chunk_size", + mStreamingServerChunckSize, mStreamingServerChunckSize, " * Stream Chunk size: ", false, 1024, + 65000); + + sl_tools::getParam( + shared_from_this(), "stream_server.adaptative_bitrate", + mStreamingServerAdaptiveBitrate, + mStreamingServerAdaptiveBitrate, " * Adaptive bitrate: "); + + sl_tools::getParam( + shared_from_this(), "stream_server.target_framerate", + mStreamingServerTargetFramerate, + mStreamingServerTargetFramerate, " * Target frame rate:", false, 0, 120); +} + +void ZedCamera::getAdvancedParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== ADVANCED parameters ==="); + + sl_tools::getParam( + shared_from_this(), "advanced.change_thread_priority", + mChangeThreadSched, mChangeThreadSched, + " * Change thread priority: "); + + if (mChangeThreadSched) { + mThreadSchedPolicy = "SCHED_OTHER"; + sl_tools::getParam( + shared_from_this(), "advanced.thread_sched_policy", + mThreadSchedPolicy, mThreadSchedPolicy, + " * Thread sched. policy: "); + + if (mThreadSchedPolicy == "SCHED_FIFO" || mThreadSchedPolicy == "SCHED_RR") { + if (!sl_tools::checkRoot()) { + RCLCPP_WARN_STREAM( + get_logger(), + "'sudo' permissions required to set " + << mThreadSchedPolicy + << " thread scheduling policy. Using Linux " + "default [SCHED_OTHER]"); + mThreadSchedPolicy = "SCHED_OTHER"; + } else { + sl_tools::getParam( + shared_from_this(), "advanced.thread_grab_priority", + mThreadPrioGrab, mThreadPrioGrab, + " * Grab thread priority: "); + sl_tools::getParam( + shared_from_this(), "advanced.thread_sensor_priority", + mThreadPrioSens, mThreadPrioSens, + " * Sensors thread priority: "); + sl_tools::getParam( + shared_from_this(), + "advanced.thread_pointcloud_priority", + mThreadPrioPointCloud, mThreadPrioPointCloud, + " * Point Cloud thread priority: "); + } + } + } +} + +rcl_interfaces::msg::SetParametersResult ZedCamera::callback_dynamicParamChange( + std::vector parameters) +{ + DEBUG_STREAM_COMM("Parameter change callback"); + + rcl_interfaces::msg::SetParametersResult result; + result.successful = true; + + DEBUG_STREAM_COMM("Modifying " << parameters.size() << " parameters"); + + int count = 0; + + for (const rclcpp::Parameter & param : parameters) { + count++; + + DEBUG_STREAM_COMM("Param #" << count << ": " << param.get_name()); + + + if (param.get_name() == "pos_tracking.transform_time_offset") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + + double val = param.as_double(); + mTfOffset = val; + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " << val); + } else if (param.get_name() == "pos_tracking.path_pub_rate") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + + double val = param.as_double(); + + if (val < -1.0 || val > mCamGrabFrameRate) { + result.successful = false; + result.reason = + param.get_name() + + " must be >= -1 and <= `general.grab_frame_rate` (0 or -1 = no limit)"; + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + if (val <= 0.0) { + val = static_cast(mCamGrabFrameRate); + } + + mPathPubRate = val; + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " << val); + } else if (param.get_name() == "mapping.fused_pointcloud_freq") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + + double val = param.as_double(); + + if (val < -1.0 || val > mPcPubRate) { + result.successful = false; + result.reason = param.get_name() + + " must be >= -1 and <= `point_cloud_freq` (0 or -1 = no limit)"; + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + if (val <= 0.0) { + val = mPcPubRate; + } + + mFusedPcPubRate = val; + startFusedPcTimer(mFusedPcPubRate); // Reset publishing timer + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " << val); + } else if (param.get_name() == "sensors.sensors_pub_rate") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + double value = param.as_double(); + if (value != mSensPubRate) { + mSensPubRate = value; + mPubImuTF_sec->setNewSize(static_cast(mSensPubRate)); + mImuPeriodMean_sec->setNewSize(static_cast(mSensPubRate)); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " + << value); + } + } else if (param.get_name() == "svo.replay_rate") { + rclcpp::ParameterType correctType = + rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + break; + } + double value = param.as_double(); + + mSvoRate = value; + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" << param.get_name() + << "' correctly set to " << value); + } + + // ----> Video/Depth dynamic parameters + if (!handleVideoDepthDynamicParams(param, result)) { + break; + } + // <---- Video/Depth dynamic parameters + + // ----> Object Detection dynamic parameters + if (mUsingCustomOd) { + if (!handleCustomOdDynamicParams(param, result)) { + break; + } + } else { + if (!handleOdDynamicParams(param, result)) { + break; + } + } + // <---- Object Detection dynamic parameters + + // ----> Body Tracking dynamica parameters + if (!handleBodyTrkDynamicParams(param, result)) { + break; + } + // <---- Body Tracking dynamica parameters + } + + if (result.successful) { + DEBUG_STREAM_COMM( + "Correctly set " << count << "/" << parameters.size() + << " parameters"); + } else { + DEBUG_STREAM_COMM( + "Correctly set " << count - 1 << "/" + << parameters.size() << " parameters"); + } + + return result; +} + +void ZedCamera::setTFCoordFrameNames() +{ + // ----> Coordinate frames + mCenterFrameId = mCameraName + "_camera_center"; + mLeftCamFrameId = mCameraName + "_left_camera_frame"; + mLeftCamOptFrameId = mCameraName + "_left_camera_frame_optical"; + mRightCamFrameId = mCameraName + "_right_camera_frame"; + mRightCamOptFrameId = mCameraName + "_right_camera_frame_optical"; + + mImuFrameId = mCameraName + "_imu_link"; + mBaroFrameId = mCenterFrameId; // mCameraName + "_baro_link"; + mMagFrameId = mImuFrameId; // mCameraName + "_mag_link"; + mTempLeftFrameId = mLeftCamFrameId; // mCameraName + "_temp_left_link"; + mTempRightFrameId = mRightCamFrameId; // mCameraName + "_temp_right_link"; + + mDepthFrameId = mLeftCamFrameId; + mDepthOptFrameId = mLeftCamOptFrameId; + mPointCloudFrameId = mDepthFrameId; + + // Print TF frames + RCLCPP_INFO_STREAM(get_logger(), "=== TF FRAMES ==="); + RCLCPP_INFO_STREAM(get_logger(), " * Map\t\t\t-> " << mMapFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Odometry\t\t-> " << mOdomFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Base\t\t\t-> " << mBaseFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Camera\t\t-> " << mCenterFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Left\t\t\t-> " << mLeftCamFrameId); + RCLCPP_INFO_STREAM( + get_logger(), + " * Left Optical\t\t-> " << mLeftCamOptFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Right\t\t\t-> " << mRightCamFrameId); + RCLCPP_INFO_STREAM( + get_logger(), + " * Right Optical\t\t-> " << mRightCamOptFrameId); + if (!mDepthDisabled) { + RCLCPP_INFO_STREAM(get_logger(), " * Depth\t\t\t-> " << mDepthFrameId); + RCLCPP_INFO_STREAM( + get_logger(), + " * Depth Optical\t\t-> " << mDepthOptFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Point Cloud\t\t-> " << mPointCloudFrameId); + } + + if (!sl_tools::isZED(mCamRealModel)) { + RCLCPP_INFO_STREAM(get_logger(), " * IMU\t\t\t-> " << mImuFrameId); + + if (sl_tools::isZED2OrZED2i(mCamUserModel)) { + RCLCPP_INFO_STREAM(get_logger(), " * Barometer\t\t-> " << mBaroFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Magnetometer\t\t-> " << mMagFrameId); + RCLCPP_INFO_STREAM( + get_logger(), + " * Left Temperature\t-> " << mTempLeftFrameId); + RCLCPP_INFO_STREAM( + get_logger(), + " * Right Temperature\t-> " << mTempRightFrameId); + } + } + // <---- Coordinate frames +} + +void ZedCamera::initPublishers() +{ + RCLCPP_INFO(get_logger(), "=== PUBLISHED TOPICS ==="); + + // ----> Topics names definition + mPointcloudFusedTopic = mTopicRoot + "mapping/fused_cloud"; + + std::string object_det_topic_root = "obj_det"; + mObjectDetTopic = mTopicRoot + object_det_topic_root + "/objects"; + + std::string body_trk_topic_root = "body_trk"; + mBodyTrkTopic = mTopicRoot + body_trk_topic_root + "/skeletons"; + + // Set the positional tracking topic names + mPoseTopic = mTopicRoot + "pose"; + mPoseStatusTopic = mPoseTopic + "/status"; + mPoseCovTopic = mPoseTopic + "_with_covariance"; + mGnssPoseTopic = mPoseTopic + "/filtered"; + mGnssPoseStatusTopic = mGnssPoseTopic + "/status"; + mGeoPoseTopic = mTopicRoot + "geo_pose"; + mGeoPoseStatusTopic = mGeoPoseTopic + "/status"; + mFusedFixTopic = mPoseTopic + "/fused_fix"; + mOriginFixTopic = mPoseTopic + "/origin_fix"; + + mPointcloud3DLandmarksTopic = mPoseTopic + "/landmarks"; + + mOdomTopic = mTopicRoot + "odom"; + mOdomPathTopic = mTopicRoot + "path_odom"; + mPosePathTopic = mTopicRoot + "path_map"; + + // Set the Sensors topic names + std::string temp_topic_root = "temperature"; + std::string imuTopicRoot = "imu"; + std::string imu_topic_name = "data"; + std::string imu_topic_raw_name = "data_raw"; + std::string imu_topic_mag_name = "mag"; + // std::string imu_topic_mag_raw_name = "mag_raw"; + std::string pressure_topic_name = "atm_press"; + + std::string imu_topic = mTopicRoot + imuTopicRoot + "/" + imu_topic_name; + std::string imu_topic_raw = + mTopicRoot + imuTopicRoot + "/" + imu_topic_raw_name; + std::string imu_temp_topic = + mTopicRoot + temp_topic_root + "/" + imuTopicRoot; + std::string imu_mag_topic = + mTopicRoot + imuTopicRoot + "/" + imu_topic_mag_name; + // std::string imu_mag_topic_raw = imuTopicRoot + "/" + + // imu_topic_mag_raw_name; + std::string pressure_topic = + mTopicRoot + /*imuTopicRoot + "/" +*/ pressure_topic_name; + std::string temp_topic_left = mTopicRoot + temp_topic_root + "/left"; + std::string temp_topic_right = mTopicRoot + temp_topic_root + "/right"; + + // Status topic name + std::string status_root = mTopicRoot + "status/"; + std::string svo_status_topic = status_root + "svo"; + std::string health_status_topic = status_root + "health"; + std::string heartbeat_topic = status_root + "heartbeat"; + // <---- Topics names definition + + // ----> SVO Status publisher + if (mSvoMode) { + if (mPublishStatus) { + mPubSvoStatus = create_publisher( + svo_status_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubSvoStatus->get_topic_name()); + } + if (mUseSvoTimestamp && mPublishSvoClock) { + auto clock_qos = rclcpp::ClockQoS(); + clock_qos.reliability(rclcpp::ReliabilityPolicy::Reliable); // REQUIRED + mPubClock = + create_publisher("/clock", clock_qos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubClock->get_topic_name()); + } + } + // <---- SVO Status publisher + + if (mPublishStatus) { + // ----> Health Status publisher + mPubHealthStatus = create_publisher( + health_status_topic, + mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubHealthStatus->get_topic_name()); + // <---- Health Status publisher + + // ----> Heartbeat Status publisher + mPubHeartbeatStatus = create_publisher( + heartbeat_topic, + mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubHeartbeatStatus->get_topic_name()); + // <---- Heartbeat Status publisher + } + + initVideoDepthPublishers(); + + // POS. TRACKING and GNSS FUSION parameters +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (!mDepthDisabled) { +#endif + // ----> Pos Tracking + if (mPublishOdomPose) { + mPubPose = create_publisher( + mPoseTopic, + mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubPose->get_topic_name()); + if (mPublishPoseCov) { + mPubPoseCov = + create_publisher( + mPoseCovTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " << mPubPoseCov->get_topic_name()); + } + if (mPublishStatus) { + mPubPoseStatus = create_publisher( + mPoseStatusTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubPoseStatus->get_topic_name()); + } + mPubOdom = + create_publisher(mOdomTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubOdom->get_topic_name()); + + if (mPublishPath) { + mPubPosePath = + create_publisher(mPosePathTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubPosePath->get_topic_name()); + mPubOdomPath = + create_publisher(mOdomPathTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubOdomPath->get_topic_name()); + } + if (mPublish3DLandmarks) { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + mPub3DLandmarks = point_cloud_transport::create_publisher( + shared_from_this(), mPointcloud3DLandmarksTopic, mQos.get_rmw_qos_profile(), + mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPub3DLandmarks.getTopic()); +#else + mPub3DLandmarks = create_publisher( + mPointcloud3DLandmarksTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPub3DLandmarks->get_topic_name()); +#endif + } + } + if (mGnssFusionEnabled) { + mPubGnssPose = create_publisher( + mGnssPoseTopic, + mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic (GNSS): " + << mPubGnssPose->get_topic_name()); + mPubGnssPoseStatus = create_publisher( + mGnssPoseStatusTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubGnssPoseStatus->get_topic_name()); + mPubGeoPose = create_publisher( + mGeoPoseTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic (GNSS): " + << mPubGeoPose->get_topic_name()); + mPubGeoPoseStatus = create_publisher( + mGeoPoseStatusTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + "Advertised on topic (GNSS): " + << mPubGeoPoseStatus->get_topic_name()); + mPubFusedFix = create_publisher( + mFusedFixTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic (GNSS): " + << mPubFusedFix->get_topic_name()); + + mPubOriginFix = create_publisher( + mOriginFixTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic (GNSS origin): " + << mPubOriginFix->get_topic_name()); + + } + // <---- Pos Tracking + + // ----> Mapping + if (mMappingEnabled) { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + mPubFusedCloud = point_cloud_transport::create_publisher( + shared_from_this(), mPointcloudFusedTopic, mQos.get_rmw_qos_profile(), + mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPubFusedCloud.getTopic() + << " @ " << mFusedPcPubRate + << " Hz"); +#else + mPubFusedCloud = create_publisher( + mPointcloudFusedTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPubFusedCloud->get_topic_name() + << " @ " << mFusedPcPubRate + << " Hz"); +#endif + } + + if (mPublishDetPlane) { + std::string marker_topic = mTopicRoot + "plane_marker"; + std::string plane_topic = mTopicRoot + "plane"; + // Rviz markers publisher + mPubMarker = create_publisher( + marker_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubMarker->get_topic_name()); + // Detected planes publisher + mPubPlane = create_publisher( + plane_topic, mQos, + mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubPlane->get_topic_name()); + } + // <---- Mapping + } + + // ----> Sensors + if (!sl_tools::isZED(mCamRealModel)) { + if (mPublishSensImu) { + mPubImu = create_publisher(imu_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubImu->get_topic_name()); + } + if (mPublishSensImuRaw) { + mPubImuRaw = + create_publisher(imu_topic_raw, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubImuRaw->get_topic_name()); + } + + if (sl_tools::isZED2OrZED2i(mCamRealModel) || + sl_tools::isZEDX(mCamRealModel)) + { + if (mPublishSensTemp) { + mPubImuTemp = create_publisher( + imu_temp_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubImuTemp->get_topic_name()); + } + } + + if (sl_tools::isZED2OrZED2i(mCamRealModel)) { + if (mPublishSensMag) { + mPubImuMag = create_publisher( + imu_mag_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubImuMag->get_topic_name()); + } + if (mPublishSensBaro) { + mPubPressure = create_publisher( + pressure_topic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubPressure->get_topic_name()); + } + if (mPublishSensTemp) { + mPubTempL = create_publisher( + temp_topic_left, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " << mPubTempL->get_topic_name()); + mPubTempR = create_publisher( + temp_topic_right, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " << mPubTempR->get_topic_name()); + } + } + + // ----> Camera/imu transform message + if (mPublishSensImuTransf) { + std::string cam_imu_tr_topic = mTopicRoot + "left_cam_imu_transform"; + auto qos = mQos; + if (!mUsingIPC) { + // Use TRANSIENT_LOCAL durability if not using intra-process comms + qos = qos.durability(rclcpp::DurabilityPolicy::TransientLocal); + } + mPubCamImuTransf = create_publisher( + cam_imu_tr_topic, qos, mPubOpt); + + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " << mPubCamImuTransf->get_topic_name()); + } + + sl::Orientation sl_rot = mSlCamImuTransf.getOrientation(); + sl::Translation sl_tr = mSlCamImuTransf.getTranslation(); + RCLCPP_INFO( + get_logger(), "Camera-IMU Translation: \n %g %g %g", sl_tr.x, + sl_tr.y, sl_tr.z); + RCLCPP_INFO( + get_logger(), "Camera-IMU Rotation:\n%s", + sl_rot.getRotationMatrix().getInfos().c_str()); + // <---- Camera/imu transform message + } + // <---- Sensors +} + +void ZedCamera::initSubscribers() +{ + RCLCPP_INFO(get_logger(), "=== Subscribers ==="); + // ----> Clicked Point Subscriber + /* From `$ ros2 topic info /clicked_point -v` + QoS profile: + Reliability: RMW_QOS_POLICY_RELIABILITY_RELIABLE + Durability: RMW_QOS_POLICY_DURABILITY_VOLATILE + Lifespan: 2147483651294967295 nanoseconds + Deadline: 2147483651294967295 nanoseconds + Liveliness: RMW_QOS_POLICY_LIVELINESS_AUTOMATIC + Liveliness lease duration: 2147483651294967295 nanoseconds + */ + int sub_count = 0; + + if (!mDepthDisabled && mPosTrackingEnabled) { + mClickedPtSub = create_subscription( + mClickedPtTopic, mQos, + std::bind(&ZedCamera::callback_clickedPoint, this, _1), mSubOpt); + sub_count++; + + RCLCPP_INFO_STREAM( + get_logger(), " * Plane detection: '" + << mClickedPtSub->get_topic_name() + << "'"); + } + // <---- Clicked Point Subscriber + + // ----> GNSS Fix Subscriber + /* From `$ ros2 topic info /fix -v` + QoS profile: + Reliability: RELIABLE + History (Depth): KEEP_LAST (10) + Durability: VOLATILE + Lifespan: Infinite + Deadline: Infinite + Liveliness: AUTOMATIC + Liveliness lease duration: Infinite + */ + if (mGnssFusionEnabled && !mSvoMode) { + mGnssMsgReceived = false; + mGnssFixValid = false; + + mGnssFixSub = create_subscription( + mGnssTopic, mQos, std::bind(&ZedCamera::callback_gnssFix, this, _1), + mSubOpt); + sub_count++; + + RCLCPP_INFO_STREAM( + get_logger(), " * GNSS Fix: '" << mGnssFixSub->get_topic_name() << "'"); + } + // <---- GNSS Fix Subscriber + + // ----> Clock Subscriber + /* From `$ ros2 topic info /clock -v` + + QoS profile: + Reliability: RELIABLE + History (Depth): KEEP_LAST (10) + Durability: VOLATILE + Lifespan: Infinite + Deadline: Infinite + Liveliness: AUTOMATIC + Liveliness lease duration: Infinite + */ + + if (mUseSimTime) { + mClockAvailable = false; + + mClockSub = create_subscription( + "/clock", mQos, std::bind(&ZedCamera::callback_clock, this, _1), + mSubOpt); + sub_count++; + + RCLCPP_INFO_STREAM( + get_logger(), + " * Sim Clock: '" << mClockSub->get_topic_name() << "'"); + } + + if (sub_count == 0) { + RCLCPP_INFO_STREAM(get_logger(), " * No subscribers"); + } +} + +bool ZedCamera::startCamera() +{ + RCLCPP_INFO(get_logger(), "=== STARTING CAMERA ==="); + + // // CUDA context check + // CUcontext * primary_cuda_context; + // cuCtxGetCurrent(primary_cuda_context); + // int ctx_gpu_id; + // cudaGetDevice(&ctx_gpu_id); + + // Create a ZED object + mZed = std::make_shared(); + + // ----> SDK version + RCLCPP_INFO( + get_logger(), "ZED SDK Version: %d.%d.%d - Build %s", + ZED_SDK_MAJOR_VERSION, ZED_SDK_MINOR_VERSION, + ZED_SDK_PATCH_VERSION, ZED_SDK_BUILD_ID); + // <---- SDK version + + // ----> TF2 Transform + mTfBuffer = std::make_unique(get_clock()); + mTfListener = std::make_unique( + *mTfBuffer); // Start TF Listener thread + mTfBroadcaster = std::make_unique(this); + + mStaticTfPublished = false; + mStaticImuTfPublished = false; + if (!mUsingIPC) { + mStaticTfBroadcaster = + std::make_unique(this); + } else { // Cannot use TRANSIENT_LOCAL with intra-process comms + mStaticTfBroadcaster.reset(); + } + // <---- TF2 Transform + + // ----> ZED configuration + + // if (primary_cuda_context) { + // mInitParams.sdk_cuda_ctx = *primary_cuda_context; + // } else { + // RCLCPP_INFO( + // get_logger(), + // "No ready CUDA context found, using default ZED SDK context."); + // } + + if (mSimMode) { // Simulation? + RCLCPP_INFO_STREAM( + get_logger(), "=== CONNECTING TO THE SIMULATION SERVER [" + << mSimAddr.c_str() << ":" << mSimPort + << "] ==="); + + mInitParams.input.setFromStream(mSimAddr.c_str(), mSimPort); + } else if (!mSvoFilepath.empty()) { + RCLCPP_INFO(get_logger(), "=== SVO OPENING ==="); + + mInitParams.input.setFromSVOFile(mSvoFilepath.c_str()); + mInitParams.svo_real_time_mode = mSvoRealtime; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + mInitParams.svo_decryption_key = mSvoDecryptionKey.c_str(); +#endif + } else if (!mStreamAddr.empty()) { + RCLCPP_INFO(get_logger(), "=== LOCAL STREAMING OPENING ==="); + + mInitParams.input.setFromStream( + mStreamAddr.c_str(), + static_cast(mStreamPort)); + } else { + RCLCPP_INFO(get_logger(), "=== CAMERA OPENING ==="); + + mInitParams.camera_fps = mCamGrabFrameRate; + mInitParams.grab_compute_capping_fps = static_cast(mGrabComputeCappingFps); + mInitParams.camera_resolution = static_cast(mCamResol); + mInitParams.async_image_retrieval = mAsyncImageRetrieval; + mInitParams.enable_image_validity_check = mImageValidityCheck; + + if (mCamUserModel == sl::MODEL::VIRTUAL_ZED_X) { + if (mCamVirtualSerialNumbers.size() == 2) { + // Generate the virtual serial number from the two real serial numbers + auto virtual_sn = + sl::generateVirtualStereoSerialNumber( + mCamVirtualSerialNumbers[0], + mCamVirtualSerialNumbers[1]); + mInitParams.input.setVirtualStereoFromSerialNumbers( + mCamVirtualSerialNumbers[0], mCamVirtualSerialNumbers[1], virtual_sn); + } else if (mCamVirtualCameraIds.size() == 2) { + + // Here we need the ZED X One serial numbers to generate the virtual camera SN + auto cams = sl::CameraOne::getDeviceList(); + std::vector serials; + for (const auto & cam : cams) { + if (std::any_of( + mCamVirtualCameraIds.begin(), mCamVirtualCameraIds.end(), + [&cam](int id) {return cam.id == id;})) + { + serials.push_back(cam.serial_number); + } + } + + if (serials.size() < 2) { + RCLCPP_ERROR( + get_logger(), + "To use VIRTUAL_ZED_X model with camera IDs, the cameras must be connected and recognized by the system."); + return false; + } + + // Generate the virtual serial number from the two real serial numbers + auto virtual_sn = sl::generateVirtualStereoSerialNumber(serials[0], serials[1]); + + mInitParams.input.setVirtualStereoFromCameraIDs( + mCamVirtualCameraIds[0], mCamVirtualCameraIds[1], virtual_sn); + } else { + RCLCPP_ERROR( + get_logger(), + "To use VIRTUAL_ZED_X model, you must provide either two VALID serial numbers or two VALID camera IDs."); + return false; + } + } else { + if (mCamSerialNumber > 0) { + mInitParams.input.setFromSerialNumber(mCamSerialNumber); + } else if (mCamId >= 0) { + mInitParams.input.setFromCameraID(mCamId); + } + } + } + + mInitParams.coordinate_system = ROS_COORDINATE_SYSTEM; + mInitParams.coordinate_units = ROS_MEAS_UNITS; + mInitParams.depth_mode = mDepthMode; + + // Set env var for custom depth model override if specified + if (!mDepthModelOverride.empty()) { +#if (ZED_SDK_MAJOR_VERSION < 5) || \ + (ZED_SDK_MAJOR_VERSION == 5 && ZED_SDK_MINOR_VERSION < 2) || \ + (ZED_SDK_MAJOR_VERSION == 5 && ZED_SDK_MINOR_VERSION == 2 && ZED_SDK_PATCH_VERSION < 2) + RCLCPP_WARN( + get_logger(), + "*** Depth model override requires ZED SDK >= 5.2.2. " + "Current SDK version is %d.%d.%d. The override will likely be ignored. ***", + ZED_SDK_MAJOR_VERSION, ZED_SDK_MINOR_VERSION, ZED_SDK_PATCH_VERSION); +#endif + std::string depth_mode_name = sl_tools::toUpper( + std::string(sl::toString(mDepthMode).c_str())); + std::string env_var = "ZED_SDK_OVERRIDE_" + depth_mode_name; + setenv(env_var.c_str(), mDepthModelOverride.c_str(), 1); + RCLCPP_WARN_STREAM( + get_logger(), + "*** DEPTH MODEL OVERRIDE ACTIVE ***\n" + << " Mode: " << sl::toString(mDepthMode).c_str() << "\n" + << " Model: " << mDepthModelOverride << "\n" + << " Env var: " << env_var << "=" << mDepthModelOverride); + } + + mInitParams.sdk_verbose = mVerbose; + mInitParams.sdk_verbose_log_file = mVerboseLogFile.c_str(); + mInitParams.sdk_gpu_id = mGpuId; + if (mDepthStabilization >= 0) { + mInitParams.depth_stabilization = mDepthStabilization; + } // else: keep SDK constructed default + mInitParams.camera_image_flip = (mCameraFlip ? sl::FLIP_MODE::ON : sl::FLIP_MODE::OFF); + mInitParams.depth_minimum_distance = mCamMinDepth; + mInitParams.depth_maximum_distance = mCamMaxDepth; + + if (!mOpencvCalibFile.empty()) { + mInitParams.optional_opencv_calibration_file = mOpencvCalibFile.c_str(); + } + + mInitParams.camera_disable_self_calib = !mCameraSelfCalib; + mInitParams.enable_image_enhancement = true; + mInitParams.enable_right_side_measure = false; + + mInitParams.async_grab_camera_recovery = + true; // Camera recovery is handled asynchronously to provide information + // about this status + + // Set the maximum working resolution between video and point cloud to boost the pipeline processing + if (mMatResol.width > mPcResol.width) { + mInitParams.maximum_working_resolution = mMatResol; + } else { + mInitParams.maximum_working_resolution = mPcResol; + } + // <---- ZED configuration + + // ----> Try to connect to a camera, to a stream, or to load an SVO + sl_tools::StopWatch connectTimer(get_clock()); + + mThreadStop = false; + mGrabStatus = sl::ERROR_CODE::LAST; + + while (1) { + rclcpp::sleep_for(500ms); + + mConnStatus = mZed->open(mInitParams); + + if (mConnStatus == sl::ERROR_CODE::SUCCESS) { + DEBUG_STREAM_COMM("Opening successfull"); + mUptimer.tic(); // Sets the beginning of the camera connection time + break; + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + if (mConnStatus == sl::ERROR_CODE::DRIVER_FAILURE) { + RCLCPP_ERROR_STREAM( + get_logger(), + "ZED X Driver failure: " + << sl::toVerbose(mConnStatus) + << ". Please verify that the ZED drivers are correctly installed."); + return false; + } +#endif + + if (mConnStatus == sl::ERROR_CODE::INVALID_CALIBRATION_FILE) { + if (mOpencvCalibFile.empty()) { + RCLCPP_ERROR_STREAM(get_logger(), "Calibration file error: " << sl::toVerbose(mConnStatus)); + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "If you are using a custom OpenCV calibration file, please check " + "the correctness of the path of the calibration file " + "in the parameter 'general.optional_opencv_calibration_file': '" + << mOpencvCalibFile << "'."); + RCLCPP_ERROR( + get_logger(), + "If the file exists, it may contain invalid information."); + } + return false; + } + + if (mSvoMode) { + RCLCPP_WARN( + get_logger(), "Error opening SVO: %s", + sl::toString(mConnStatus).c_str()); + return false; + } else if (mSimMode) { + RCLCPP_WARN( + get_logger(), "Error connecting to the simulation server: %s", + sl::toString(mConnStatus).c_str()); + } else { + RCLCPP_WARN( + get_logger(), "Error opening camera: %s", + sl::toString(mConnStatus).c_str()); + if (mConnStatus == sl::ERROR_CODE::CAMERA_DETECTION_ISSUE && + sl_tools::isZEDM(mCamUserModel)) + { + RCLCPP_INFO( + get_logger(), + "Try to flip the USB3 Type-C connector and verify the USB3 " + "connection"); + } else { + RCLCPP_INFO(get_logger(), "Please verify the camera connection"); + } + } + + if (!rclcpp::ok() || mThreadStop) { + RCLCPP_INFO(get_logger(), "ZED activation interrupted by user."); + return false; + } + + if (connectTimer.toc() > mMaxReconnectTemp * mCamTimeoutSec) { + RCLCPP_ERROR(get_logger(), "Camera detection timeout"); + return false; + } + + mDiagUpdater.force_update(); + + rclcpp::sleep_for(std::chrono::seconds(mCamTimeoutSec)); + } + // ----> Try to connect to a camera, to a stream, or to load an SVO + + // ----> Set SVO first frame if required + if (mSvoMode && mSvoFrameStart != 0) { + int svo_frames = mZed->getSVONumberOfFrames(); + + if (mSvoFrameStart > svo_frames) { + RCLCPP_ERROR_STREAM( + get_logger(), + "The SVO contains " << svo_frames << " frames. The requested starting frame (" + << mSvoFrameStart << ") is invalid."); + return false; + } + + mZed->setSVOPosition(mSvoFrameStart); + RCLCPP_WARN_STREAM( + get_logger(), + "SVO playing from frame #" << mSvoFrameStart); + } + + + // ----> If SVO and GNSS enabled check that it's a valid SV0 Gen.2 + if (mSvoMode && mGnssFusionEnabled) { + // TODO(Walter) Check SVO version when it's available + + mGnssReplay = std::make_unique(mZed); + if (!mGnssReplay->initialize()) { + RCLCPP_ERROR(get_logger(), "The SVO file does not contain valid GNSS information."); + return false; + } else { + RCLCPP_INFO( + get_logger(), + "GNSS information will be retrieved from the SVO file."); + } + } + // <---- If SVO and GNSS enabled check that it's a valid SV0 Gen.2 + + // ----> If SVO and positional tracking Gen3 check that it's a valid SVO Gen3 + if (mSvoMode && mPosTrackingEnabled && + mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) + { + // TODO(Walter) Check SVO version when it's available + } + // <---- If SVO and positional tracking Gen3 check that it's a valid SVO Gen3 + + // ----> Camera information + sl::CameraInformation camInfo = mZed->getCameraInformation(); + + float realFps = camInfo.camera_configuration.fps; + if (realFps != static_cast(mCamGrabFrameRate)) { + if (!mSvoMode) { + RCLCPP_WARN_STREAM( + get_logger(), + "!!! `general.grab_frame_rate` value is not valid: '" + << mCamGrabFrameRate + << "'. Automatically replaced with '" << realFps + << "'. Please fix the parameter !!!"); + } + mCamGrabFrameRate = realFps; + + // ----> Check publishing rates + if (mVdPubRate > mCamGrabFrameRate) { + mVdPubRate = mCamGrabFrameRate; + RCLCPP_WARN_STREAM( + get_logger(), + "Video/Depth publishing rate was too high [" << mVdPubRate << "], capped to real grab rate: " << + mCamGrabFrameRate); + } + if (mPcPubRate > mCamGrabFrameRate) { + mPcPubRate = mCamGrabFrameRate; + RCLCPP_WARN_STREAM( + get_logger(), + "PointCloud publishing rate was too high [" + << mPcPubRate << "], capped to real grab rate: " + << mCamGrabFrameRate); + } + // <---- Check publishing rates + } + if (mSvoMode && !mSvoRealtime) { + mVdPubRate = static_cast(mCamGrabFrameRate) * mSvoRate; + } + + // CUdevice devid; + cuCtxGetDevice(&mGpuId); + RCLCPP_INFO_STREAM(get_logger(), " * ZED SDK running on GPU #" << mGpuId); + + // Camera model + mCamRealModel = camInfo.camera_model; + + if (mCamRealModel == sl::MODEL::ZED) { + if (mCamUserModel != sl::MODEL::ZED) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zed'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_M) { + if (mCamUserModel != sl::MODEL::ZED_M) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedm'"); + } + } else if (mCamRealModel == sl::MODEL::ZED2) { + if (mCamUserModel != sl::MODEL::ZED2) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zed2'"); + } + } else if (mCamRealModel == sl::MODEL::ZED2i) { + if (mCamUserModel != sl::MODEL::ZED2i) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zed2i'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_X) { + if (mCamUserModel != sl::MODEL::ZED_X) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedx'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_XM) { + if (mCamUserModel != sl::MODEL::ZED_XM) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedxm'"); + } + } else if (mCamRealModel == sl::MODEL::VIRTUAL_ZED_X) { + if (mCamUserModel != sl::MODEL::VIRTUAL_ZED_X) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'virtual'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_X_HDR) { + if (mCamUserModel != sl::MODEL::ZED_X_HDR) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedxhdr'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_X_HDR_MINI) { + if (mCamUserModel != sl::MODEL::ZED_X_HDR_MINI) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedxhdrmini'"); + } + } else if (mCamRealModel == sl::MODEL::ZED_X_HDR_MAX) { + if (mCamUserModel != sl::MODEL::ZED_X_HDR_MAX) { + RCLCPP_WARN( + get_logger(), + "Camera model does not match user parameter. Please modify " + "the value of the parameter 'general.camera_model' to 'zedxhdrmax'"); + } + } + + RCLCPP_INFO_STREAM( + get_logger(), " * Camera Model -> " + << sl::toString(mCamRealModel).c_str()); + mCamSerialNumber = camInfo.serial_number; + RCLCPP_INFO_STREAM(get_logger(), " * Serial Number -> " << mCamSerialNumber); + + // ----> Update HW ID + std::string hw_id = std::string("Stereolabs "); + hw_id += sl::toString(mCamRealModel).c_str(); + hw_id += " - '" + mCameraName + "'" + " - S/N: " + std::to_string(mCamSerialNumber); + mDiagUpdater.setHardwareID(hw_id); + mDiagUpdater.force_update(); + // <---- Update HW ID + + RCLCPP_INFO_STREAM( + get_logger(), + " * Focal Length\t-> " + << camInfo.camera_configuration.calibration_parameters + .left_cam.focal_length_metric + << " mm"); + + RCLCPP_INFO_STREAM( + get_logger(), + " * Input\t-> " + << sl::toString(mZed->getCameraInformation().input_type).c_str()); + if (mSvoMode) { + #if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 50 + RCLCPP_INFO( + get_logger(), " * SVO resolution -> %dx%d", + mZed->getCameraInformation().camera_configuration.resolution.width, + mZed->getCameraInformation().camera_configuration.resolution.height); + #else + RCLCPP_INFO( + get_logger(), " * SVO resolution -> %ldx%ld", + mZed->getCameraInformation().camera_configuration.resolution.width, + mZed->getCameraInformation().camera_configuration.resolution.height); + #endif + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO framerate\t -> " + << (mZed->getCameraInformation().camera_configuration.fps)); + } + + // Firmwares + if (!mSvoMode) { + mCamFwVersion = camInfo.camera_configuration.firmware_version; + + RCLCPP_INFO_STREAM( + get_logger(), + " * Camera FW Version -> " << mCamFwVersion); + if (!sl_tools::isZED(mCamRealModel)) { + mSensFwVersion = camInfo.sensors_configuration.firmware_version; + RCLCPP_INFO_STREAM( + get_logger(), + " * Sensors FW Version -> " << mSensFwVersion); + } + } + + // Camera/IMU transform + if (!sl_tools::isZED(mCamRealModel)) { + mSlCamImuTransf = camInfo.sensors_configuration.camera_imu_transform; + + DEBUG_SENS("Camera-IMU Transform:\n%s", mSlCamImuTransf.getInfos().c_str()); + } + + mCamWidth = camInfo.camera_configuration.resolution.width; + mCamHeight = camInfo.camera_configuration.resolution.height; + + RCLCPP_INFO_STREAM( + get_logger(), " * Camera grab size -> " + << mCamWidth << "x" << mCamHeight); + + int pub_w = static_cast(std::round(mCamWidth / mCustomDownscaleFactor)); + int pub_h = static_cast(std::round(mCamHeight / mCustomDownscaleFactor)); + mMatResol = sl::Resolution(pub_w, pub_h); + + RCLCPP_INFO_STREAM( + get_logger(), " * Color/Depth publishing size -> " + << mMatResol.width << "x" << mMatResol.height); + // <---- Camera information + + // ----> Point Cloud resolution + int pc_w = 0, pc_h = 0; + switch (mPcResolution) { + case PcRes::PUB: // Same as image and depth map + pc_w = pub_w; + pc_h = pub_h; + break; + case PcRes::FULL: + pc_w = NEURAL_W; + pc_h = NEURAL_H; + break; + case PcRes::COMPACT: + pc_w = NEURAL_W / 2; + pc_h = NEURAL_H / 2; + break; + case PcRes::REDUCED: + pc_w = NEURAL_W / 4; + pc_h = NEURAL_H / 4; + break; + } + mPcResol = sl::Resolution(pc_w, pc_h); + + RCLCPP_INFO_STREAM( + get_logger(), " * Point Cloud publishing size -> " + << mPcResol.width << "x" << mPcResol.height); + // <---- Point Cloud resolution1 + + + // ----> Set Region of Interest + if (!mDepthDisabled) { + if (mAutoRoiEnabled) { + RCLCPP_INFO(get_logger(), "=== Enabling Automatic ROI ==="); + + sl::RegionOfInterestParameters roi_param; + roi_param.depth_far_threshold_meters = mRoiDepthFarThresh; + roi_param.image_height_ratio_cutoff = mRoiImgHeightRationCutOff; + roi_param.auto_apply_module = mRoiModules; + + sl::ERROR_CODE err = mZed->startRegionOfInterestAutoDetection(roi_param); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + " * Error while starting automatic ROI generation: " + << sl::toString(err).c_str()); + } else { + RCLCPP_INFO( + get_logger(), + " * Automatic Region of Interest generation started."); + } + } else if (!mRoyPolyParam.empty()) { + RCLCPP_INFO(get_logger(), "=== Setting Manual ROI ==="); + sl::Resolution resol(mCamWidth, mCamHeight); + std::vector sl_poly; + + DEBUG_ROI("Parse ROI Polygon parameter"); + std::string poly_str = parseRoiPoly(mRoyPolyParam, sl_poly); + DEBUG_STREAM_ROI("Parsed ROI Polygon: " << poly_str); + DEBUG_STREAM_ROI(" * Polygon size: " << sl_poly.size()); + + DEBUG_ROI("Create ROI Mask mat"); + sl::Mat roi_mask(resol, sl::MAT_TYPE::U8_C1, sl::MEM::CPU); + + // Create ROI mask + DEBUG_ROI("Generate ROI Mask"); + if (!sl_tools::generateROI(sl_poly, roi_mask)) { + RCLCPP_WARN( + get_logger(), + " * Error generating the manual region of interest image mask."); + } else { + DEBUG_ROI("Enable ROI"); + sl::ERROR_CODE err = mZed->setRegionOfInterest(roi_mask, mRoiModules); + DEBUG_ROI("ROI Enabled"); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + " * Error while setting ZED SDK manual region of interest: " + << sl::toString(err).c_str()); + } else { + RCLCPP_INFO( + get_logger(), + " * Manual Region of Interest correctly set."); + mManualRoiEnabled = true; + } + } + } + } + // <---- Set Region of Interest + + // ----> Check default camera settings + if (_debugCamCtrl && !mSvoMode) { + int value; + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + + if (!sl_tools::isZEDX(mCamRealModel)) { + setting = sl::VIDEO_SETTINGS::BRIGHTNESS; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::CONTRAST; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::HUE; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + } + + setting = sl::VIDEO_SETTINGS::SATURATION; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::SHARPNESS; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::GAMMA; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::AEC_AGC; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::EXPOSURE; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::GAIN; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::WHITEBALANCE_AUTO; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + setting = sl::VIDEO_SETTINGS::WHITEBALANCE_TEMPERATURE; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "Default value for " << sl::toString(setting).c_str() + << ": " << value); + + if (sl_tools::isZEDX(mCamRealModel)) { + int value_min, value_max; + + setting = sl::VIDEO_SETTINGS::EXPOSURE_TIME; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " + << sl::toString(setting).c_str() << ": " << value); + + setting = sl::VIDEO_SETTINGS::AUTO_EXPOSURE_TIME_RANGE; + err = mZed->getCameraSettings(setting, value_min, value_max); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() << ": " << + sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " << + sl::toString(setting).c_str() << ": [" << value_min << "," << + value_max + << "]"); + + if (!mStreamMode) { + setting = sl::VIDEO_SETTINGS::EXPOSURE_COMPENSATION; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " + << sl::toString(setting).c_str() << ": " << value); + } + + setting = sl::VIDEO_SETTINGS::ANALOG_GAIN; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " + << sl::toString(setting).c_str() << ": " << value); + + setting = sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE; + err = mZed->getCameraSettings(setting, value_min, value_max); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() << ": " << + sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " << + sl::toString(setting).c_str() << ": [" << value_min << "," << + value_max + << "]"); + + setting = sl::VIDEO_SETTINGS::DIGITAL_GAIN; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " + << sl::toString(setting).c_str() << ": " << value); + + setting = sl::VIDEO_SETTINGS::AUTO_DIGITAL_GAIN_RANGE; + err = mZed->getCameraSettings(setting, value_min, value_max); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() << ": " << + sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " << + sl::toString(setting).c_str() << ": [" << value_min << "," << + value_max + << "]"); + + if (!mStreamMode) { + setting = sl::VIDEO_SETTINGS::DENOISING; + err = mZed->getCameraSettings(setting, value); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error Getting default param for " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_STREAM_CTRL( + "[ZEDX] Default value for " + << sl::toString(setting).c_str() << ": " << value); + } + } + } + // <----> Check default camera settings + + // ----> Camera Info messages + mLeftCamInfoMsg = std::make_shared(); + mLeftCamInfoRawMsg = std::make_shared(); + mRightCamInfoMsg = std::make_shared(); + mRightCamInfoRawMsg = std::make_shared(); + + setTFCoordFrameNames(); // Requires mZedRealCamModel available only after + // camera opening + + fillCamInfo( + mZed, mLeftCamInfoMsg, mRightCamInfoMsg, mLeftCamOptFrameId, + mRightCamOptFrameId); + fillCamInfo( + mZed, mLeftCamInfoRawMsg, mRightCamInfoRawMsg, mLeftCamOptFrameId, + mRightCamOptFrameId, true); + // <---- Camera Info messages + + initPublishers(); // Requires mZedRealCamModel available only after camera + // opening + initSubscribers(); + + // Disable AEC_AGC and Auto Whitebalance to trigger it if user set it to + // automatic + if (!mSvoMode && !mSimMode) { + mZed->setCameraSettings(sl::VIDEO_SETTINGS::AEC_AGC, 0); + mZed->setCameraSettings(sl::VIDEO_SETTINGS::WHITEBALANCE_AUTO, 0); + + // Lock on Positional Tracking mutex to avoid race conditions + std::lock_guard lock(mPtMutex); + + // Force parameters with a dummy grab + mZed->grab(); + } + + // Initialialized timestamp to avoid wrong initial data + // ----> Timestamp + if (mSvoMode) { + if (mUseSvoTimestamp) { + mFrameTimestamp = sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + + DEBUG_COMM("=========================================================*"); + DEBUG_STREAM_COMM("SVO Timestamp\t\t" << mFrameTimestamp.nanoseconds() << " nsec"); + DEBUG_STREAM_COMM( + "Current Timestamp\t" << + sl_tools::slTime2Ros( + mZed->getTimestamp( + sl::TIME_REFERENCE::CURRENT)).nanoseconds() << " nsec"); + DEBUG_COMM("=========================================================*"); + } else { + mFrameTimestamp = + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::CURRENT)); + } + } else if (mSimMode) { + if (mUseSimTime) { + mFrameTimestamp = get_clock()->now(); + } else { + mFrameTimestamp = + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + } else { + mFrameTimestamp = + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + // <---- Timestamp + + // ----> Initialize Diagnostic statistics + mElabPeriodMean_sec = std::make_unique(mCamGrabFrameRate); + mGrabPeriodMean_sec = std::make_unique(mCamGrabFrameRate); + mVideoDepthPeriodMean_sec = + std::make_unique(mCamGrabFrameRate); + mVideoDepthElabMean_sec = + std::make_unique(mCamGrabFrameRate); + mPcPeriodMean_sec = std::make_unique(mCamGrabFrameRate); + mPcProcMean_sec = std::make_unique(mCamGrabFrameRate); + mObjDetPeriodMean_sec = std::make_unique(mCamGrabFrameRate); + mObjDetElabMean_sec = std::make_unique(mCamGrabFrameRate); + mBodyTrkPeriodMean_sec = + std::make_unique(mCamGrabFrameRate); + mBodyTrkElabMean_sec = std::make_unique(mCamGrabFrameRate); + mImuPeriodMean_sec = std::make_unique(20); + mBaroPeriodMean_sec = std::make_unique(20); + mMagPeriodMean_sec = std::make_unique(20); + mPubFusedCloudPeriodMean_sec = std::make_unique(mPcPubRate); + mPubOdomTF_sec = std::make_unique(mSensPubRate); + mPubPoseTF_sec = std::make_unique(mSensPubRate); + mPubImuTF_sec = std::make_unique(mSensPubRate); + mGnssFix_sec = std::make_unique(10); + // <---- Initialize Diagnostic statistics + + if (mGnssFusionEnabled) { + DEBUG_GNSS("Initialize Fusion module"); + + // ----> Retrieve GNSS to ZED transform + RCLCPP_INFO(get_logger(), "=== Initialize GNSS Offset ==="); + if (!mGnss2BaseTransfValid) { + getGnss2BaseTransform(); + } + + mGnssAntennaPose[0] = mGnss2BaseTransf.getOrigin().x(); + mGnssAntennaPose[1] = mGnss2BaseTransf.getOrigin().y(); + mGnssAntennaPose[2] = mGnss2BaseTransf.getOrigin().z(); + // <---- Retrieve GNSS to ZED transform + + // ----> Initialize Fusion module + + // Fusion parameters + mFusionInitParams.coordinate_system = ROS_COORDINATE_SYSTEM; + mFusionInitParams.coordinate_units = ROS_MEAS_UNITS; + mFusionInitParams.verbose = mVerbose != 0; + mFusionInitParams.output_performance_metrics = true; + mFusionInitParams.timeout_period_number = 20; + + // Fusion initialization + sl::FUSION_ERROR_CODE fus_err = mFusion.init(mFusionInitParams); + if (fus_err != sl::FUSION_ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error initializing the Fusion module: " + << sl::toString(fus_err).c_str() + << "."); + exit(EXIT_FAILURE); + } + DEBUG_GNSS(" Fusion params OK"); + + mFusionConfig = std::make_shared(); + + if (mSimMode) { + // TODO(Walter) Modify when support for streaming input is added in the + // SDK mFusionConfig->input_type.setFromStream(mSimAddr, mSimPort); + mFusionConfig->input_type.setFromSerialNumber(mCamSerialNumber); + mFusionConfig->communication_parameters.setForSharedMemory(); + } else if (mSvoMode) { + mFusionConfig->input_type.setFromSVOFile(mSvoFilepath.c_str()); + mFusionConfig->communication_parameters.setForSharedMemory(); + } else { + mFusionConfig->input_type.setFromSerialNumber(mCamSerialNumber); + mFusionConfig->communication_parameters.setForSharedMemory(); + } + mFusionConfig->serial_number = mCamSerialNumber; + mFusionConfig->pose = sl::Transform::identity(); + + DEBUG_GNSS(" Fusion communication params OK"); + + // Camera identifier + mCamUuid.sn = mCamSerialNumber; + + // Enable camera publishing to Fusion + mZed->startPublishing(mFusionConfig->communication_parameters); + DEBUG_GNSS(" Camera publishing OK"); + + // Fusion subscribe to camera data + fus_err = mFusion.subscribe( + mCamUuid, mFusionConfig->communication_parameters, mFusionConfig->pose); + if (fus_err != sl::FUSION_ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error initializing the Fusion module: " + << sl::toString(fus_err).c_str()); + exit(EXIT_FAILURE); + } + DEBUG_GNSS(" Fusion subscribing OK"); + DEBUG_GNSS("Fusion module ready"); + // <---- Initialize Fusion module + } + + // Init and start threads + initThreads(); + + return true; +} // namespace stereolabs + +void ZedCamera::closeCamera() +{ + std::lock_guard lock(mCloseCameraMutex); + if (mZed == nullptr) { + return; + } + + RCLCPP_INFO(get_logger(), "=== CLOSING CAMERA ==="); + + if (mPosTrackingStarted && !mAreaMemoryFilePath.empty() && + mSaveAreaMemoryOnClosing) + { + DEBUG_STREAM_COMM("Saving area memory on: " << mAreaMemoryFilePath); + saveAreaMemoryFile(mAreaMemoryFilePath); + DEBUG_STREAM_COMM("Saved area memory on: " << mAreaMemoryFilePath); + } + + mZed->close(); + mZed.reset(); + RCLCPP_INFO(get_logger(), "=== CAMERA CLOSED ==="); +} + +void ZedCamera::initThreads() +{ + // Start Heartbeat timer + startHeartbeatTimer(); + + // ----> Start CMOS Temperatures thread + if (!mSimMode && !sl_tools::isZED(mCamRealModel) && + !sl_tools::isZEDM(mCamRealModel)) + { + startTempPubTimer(); + } + // <---- Start CMOS Temperatures thread + + // ----> Start Sensors thread if not sync + if (!mSensCameraSync && !sl_tools::isZED(mCamRealModel)) { + mSensThread = std::thread(&ZedCamera::threadFunc_pubSensorsData, this); + } + // <---- Start Sensors thread if not sync + + // ----> Start Video/Depth thread + mVdDataReady = false; + mVdThread = std::thread(&ZedCamera::threadFunc_videoDepthElab, this); + // <---- Start Video/Depth thread + + // ----> Start Pointcloud thread + if (!mDepthDisabled) { + mPcDataReady = false; + mPcThread = std::thread(&ZedCamera::threadFunc_pointcloudElab, this); + } + // <---- Start Pointcloud thread + + // Start grab thread + mGrabThread = std::thread(&ZedCamera::threadFunc_zedGrab, this); +} + +void ZedCamera::startHeartbeatTimer() +{ + if (mHeartbeatTimer != nullptr) { + mHeartbeatTimer->cancel(); + } + + std::chrono::milliseconds pubPeriod_msec(HEARTBEAT_INTERVAL_MS); + mHeartbeatTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCamera::callback_pubHeartbeat, this)); +} + +void ZedCamera::startTempPubTimer() +{ + if (mTempPubTimer != nullptr) { + mTempPubTimer->cancel(); + } + + std::chrono::milliseconds pubPeriod_msec(TEMP_PUB_INTERVAL_MS); + mTempPubTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCamera::callback_pubTemp, this)); +} + +void ZedCamera::startFusedPcTimer(double fusedPcRate) +{ + if (mFusedPcTimer != nullptr) { + mFusedPcTimer->cancel(); + } + + std::chrono::milliseconds pubPeriod_msec( + static_cast(1000.0 / (fusedPcRate))); + mFusedPcTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCamera::callback_pubFusedPc, this)); +} + +void ZedCamera::startPathPubTimer(double pathTimerRate) +{ + if (mPathTimer != nullptr) { + mPathTimer->cancel(); + } + + DEBUG_PT("Starting path pub. timer"); + + if (pathTimerRate > 0) { + std::chrono::milliseconds pubPeriod_msec( + static_cast(1000.0 / (pathTimerRate))); + mPathTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCamera::callback_pubPaths, this)); + + if (mOdomPath.size() == 0 && mPosePath.size() == 0) { + if (mPathMaxCount != -1) { + DEBUG_STREAM_PT("Path vectors reserved " << mPathMaxCount << " poses."); + mOdomPath.reserve(mPathMaxCount); + mPosePath.reserve(mPathMaxCount); + + DEBUG_STREAM_PT( + "Path vector sizes: " << mOdomPath.size() << " " + << mPosePath.size()); + } + } + } else { + mOdomPath.clear(); + mPosePath.clear(); + mPathTimer->cancel(); + RCLCPP_INFO_STREAM( + get_logger(), "Path topics not published -> Pub. rate: " + << pathTimerRate << " Hz"); + } +} + +bool ZedCamera::startPosTracking() +{ + // Lock on Positional Tracking mutex to avoid race conditions + std::lock_guard lock(mPtMutex); + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (mDepthDisabled && mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (mDepthDisabled) { +#endif + RCLCPP_WARN( + get_logger(), + "Cannot start Positional Tracking if Depth processing is " + "disabled (except for GEN_3 mode)."); + return false; + } + + if (mZed && mZed->isPositionalTrackingEnabled()) { + if (!mAreaMemoryFilePath.empty() && mSaveAreaMemoryOnClosing) { + mZed->disablePositionalTracking(mAreaMemoryFilePath.c_str()); + RCLCPP_INFO( + get_logger(), + "Area memory updated before restarting the Positional " + "Tracking module."); + } else { + mZed->disablePositionalTracking(); + } + } + + RCLCPP_INFO(get_logger(), "=== Starting Positional Tracking ==="); + + RCLCPP_INFO(get_logger(), " * Waiting for valid static transformations..."); + + bool transformOk = false; + double elapsed = 0.0; + mPosTrackingReady = false; + mGnssInitGood = false; + + // auto start = std::chrono::high_resolution_clock::now(); + + sl_tools::StopWatch stopWatch(get_clock()); + + do { + transformOk = // true; + setPose( + mInitialBasePose[0], mInitialBasePose[1], mInitialBasePose[2], + mInitialBasePose[3], mInitialBasePose[4], + mInitialBasePose[5]); + + elapsed = stopWatch.toc(); + + rclcpp::sleep_for(1ms); + + if (elapsed > 10000) { + RCLCPP_WARN( + get_logger(), + " !!! Failed to get static transforms. Is the " + "'ROBOT STATE PUBLISHER' node correctly " + "working? "); + break; + } + } while (transformOk == false); + + if (transformOk) { + DEBUG_STREAM_PT( + "Time required to get valid static transforms: " + << elapsed / 1000. << " sec"); + } + + RCLCPP_INFO( + get_logger(), + "Initial ZED left camera pose (ZED pos. tracking): "); + RCLCPP_INFO( + get_logger(), " * T: [%g,%g,%g]", mInitialPoseSl.getTranslation().x, + mInitialPoseSl.getTranslation().y, mInitialPoseSl.getTranslation().z); + RCLCPP_INFO( + get_logger(), " * Q: [%g,%g,%g,%g]", mInitialPoseSl.getOrientation().ox, + mInitialPoseSl.getOrientation().oy, mInitialPoseSl.getOrientation().oz, + mInitialPoseSl.getOrientation().ow); + + // Tracking parameters + sl::PositionalTrackingParameters ptParams; + + mPoseSmoothing = false; // Always false. Pose Smoothing is to be enabled + // only for VR/AR applications + + ptParams.enable_pose_smoothing = mPoseSmoothing; + ptParams.enable_area_memory = mAreaMemory; + ptParams.area_file_path = + (mAreaFileExists ? mAreaMemoryFilePath.c_str() : ""); + ptParams.enable_localization_only = mLocalizationOnly; + ptParams.enable_imu_fusion = mImuFusion; + ptParams.initial_world_transform = mInitialPoseSl; + ptParams.set_floor_as_origin = mFloorAlignment; + ptParams.depth_min_range = mPosTrackDepthMinRange; + ptParams.set_as_static = mSetAsStatic; + ptParams.set_gravity_as_origin = mSetGravityAsOrigin; + ptParams.mode = mPosTrkMode; + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + if (mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { + ptParams.enable_2d_ground_mode = mTwoDMode; + } else { + ptParams.enable_2d_ground_mode = false; + } +#endif + + if (_debugPosTracking) { + DEBUG_PT(" * Positional Tracking parameters:"); + sl::String json; + ptParams.encode(json); + DEBUG_PT(json.c_str()); + } + + sl::ERROR_CODE err = mZed->enablePositionalTracking(ptParams); + + if (err != sl::ERROR_CODE::SUCCESS) { + mPosTrackingStarted = false; + RCLCPP_WARN( + get_logger(), "Pos. Tracking not started: %s", + sl::toString(err).c_str()); + return false; + } + + DEBUG_PT("Positional Tracking started"); + + // ----> Enable Fusion Positional Tracking if required + if (mGnssFusionEnabled && err == sl::ERROR_CODE::SUCCESS) { + mMap2UtmTransfValid = false; + + sl::PositionalTrackingFusionParameters fusion_params; + fusion_params.enable_GNSS_fusion = mGnssFusionEnabled; + + sl::GNSSCalibrationParameters gnss_par; + gnss_par.target_yaw_uncertainty = mGnssTargetYawUncertainty; + gnss_par.enable_translation_uncertainty_target = + mGnssEnableTranslationUncertaintyTarget; + gnss_par.target_translation_uncertainty = mGnssTargetTranslationUncertainty; + gnss_par.enable_reinitialization = mGnssEnableReinitialization; + gnss_par.gnss_vio_reinit_threshold = mGnssVioReinitThreshold; + gnss_par.enable_rolling_calibration = mGnssEnableRollingCalibration; + gnss_par.gnss_antenna_position = mGnssAntennaPose; + + DEBUG_STREAM_GNSS( + "GNSS antenna pose in ZED SDK coordinate: " + << mGnssAntennaPose[0] << "," << mGnssAntennaPose[1] + << "," << mGnssAntennaPose[2]); + + fusion_params.gnss_calibration_parameters = gnss_par; + + sl::FUSION_ERROR_CODE fus_err = + mFusion.enablePositionalTracking(fusion_params); + + if (fus_err != sl::FUSION_ERROR_CODE::SUCCESS) { + mPosTrackingStarted = false; + RCLCPP_WARN( + get_logger(), "Fusion Pos. Tracking not started: %s", + sl::toString(fus_err).c_str()); + mZed->disablePositionalTracking(); + return false; + } + DEBUG_GNSS("Fusion Positional Tracking started"); + } + // <---- Enable Fusion Positional Tracking if required + + mPoseLocked = false; + mPoseLockCount = 0; + mPosTrackingStarted = true; + + startPathPubTimer(mPathPubRate); + + return mPosTrackingStarted; +} + +bool ZedCamera::saveAreaMemoryFile(const std::string & filePath) +{ + if (!mZed) { + RCLCPP_WARN(get_logger(), "ZED camera is not initialized"); + return false; + } + + if (!mAreaMemory) { + RCLCPP_WARN( + get_logger(), + "Failed to save area memory: 'Area Memory was not enabled'"); + return false; + } + + RCLCPP_INFO_STREAM(get_logger(), "Saving area memory to: '" << filePath << "' ..."); + sl::ERROR_CODE err = mZed->saveAreaMap(filePath.c_str()); + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Failed to save area memory: '" + << sl::toString(err) << "'"); + return false; + } + + auto export_state = sl::AREA_EXPORTING_STATE::RUNNING; + while (export_state == sl::AREA_EXPORTING_STATE::RUNNING) { + export_state = mZed->getAreaExportState(); + sl::sleep_ms(5); + } + + if (export_state != sl::AREA_EXPORTING_STATE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Failed to save area memory: '" + << sl::toString(export_state) << "'"); + return false; + } + + RCLCPP_INFO(get_logger(), "... Area memory saved successfully"); + return true; +} + +bool ZedCamera::start3dMapping() +{ + DEBUG_MAP("start3dMapping"); + if (mDepthDisabled) { + RCLCPP_WARN( + get_logger(), + "Cannot start 3D Mapping if Depth processing is disabled"); + return false; + } + + if (mSpatialMappingRunning) { + RCLCPP_WARN( + get_logger(), + "Cannot start 3D Mapping. The module is already running!"); + return false; + } + + bool required = mMappingEnabled; + + if (!required) { + return false; + } + + RCLCPP_INFO_STREAM(get_logger(), "=== Starting Spatial Mapping ==="); + + sl::SpatialMappingParameters params; + params.map_type = + sl::SpatialMappingParameters::SPATIAL_MAP_TYPE::FUSED_POINT_CLOUD; + params.use_chunk_only = true; + + sl::SpatialMappingParameters spMapPar; + + float lRes = spMapPar.allowed_resolution.first; + float hRes = spMapPar.allowed_resolution.second; + + if (mMappingRes < lRes) { + RCLCPP_WARN_STREAM( + get_logger(), + "'mapping.resolution' value (" + << mMappingRes + << " m) is lower than the allowed resolution " + "values. Fixed automatically"); + mMappingRes = lRes; + } + if (mMappingRes > hRes) { + RCLCPP_WARN_STREAM( + get_logger(), + "'mapping.resolution' value (" + << mMappingRes + << " m) is higher than the allowed resolution " + "values. Fixed automatically"); + mMappingRes = hRes; + } + + params.resolution_meter = mMappingRes; + + float lRng = spMapPar.allowed_range.first; + float hRng = spMapPar.allowed_range.second; + + if (mMappingRangeMax < 0) { + mMappingRangeMax = + sl::SpatialMappingParameters::getRecommendedRange(mMappingRes, *mZed.get()); + RCLCPP_INFO_STREAM( + get_logger(), "Mapping: max range set to " + << mMappingRangeMax + << " m for a resolution of " + << mMappingRes << " m"); + } else if (mMappingRangeMax < lRng) { + RCLCPP_WARN_STREAM( + get_logger(), "'mapping.max_mapping_range_m' value (" + << mMappingRangeMax + << " m) is lower than the allowed " + "resolution values. Fixed " + "automatically"); + mMappingRangeMax = lRng; + } else if (mMappingRangeMax > hRng) { + RCLCPP_WARN_STREAM( + get_logger(), "'mapping.max_mapping_range_m' value (" + << mMappingRangeMax + << " m) is higher than the allowed " + "resolution values. Fixed " + "automatically"); + mMappingRangeMax = hRng; + } + + params.range_meter = mMappingRangeMax; + + sl::ERROR_CODE err = mZed->enableSpatialMapping(params); + + if (err == sl::ERROR_CODE::SUCCESS) { + if (mPubFusedCloud == nullptr) { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + mPubFusedCloud = point_cloud_transport::create_publisher( + shared_from_this(), mPointcloudFusedTopic, mQos.get_rmw_qos_profile(), + mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPubFusedCloud.getTopic() + << " @ " << mFusedPcPubRate + << " Hz"); +#else + mPubFusedCloud = create_publisher( + mPointcloudFusedTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), "Advertised on topic " + << mPubFusedCloud->get_topic_name() + << " @ " << mFusedPcPubRate + << " Hz"); +#endif + } + + mSpatialMappingRunning = true; + + startFusedPcTimer(mFusedPcPubRate); + + RCLCPP_INFO_STREAM( + get_logger(), + " * Resolution: " << params.resolution_meter << " m"); + RCLCPP_INFO_STREAM( + get_logger(), + " * Max Mapping Range: " << params.range_meter << " m"); + RCLCPP_INFO_STREAM( + get_logger(), " * Map point cloud publishing rate: " + << mFusedPcPubRate << " Hz"); + + return true; + } else { + mSpatialMappingRunning = false; + if (mFusedPcTimer) { + mFusedPcTimer->cancel(); + } + + RCLCPP_WARN( + get_logger(), "Mapping not activated: %s", + sl::toString(err).c_str()); + + return false; + } +} + +void ZedCamera::stop3dMapping() +{ + if (mFusedPcTimer) { + mFusedPcTimer->cancel(); + } + + mSpatialMappingRunning = false; + mMappingEnabled = false; + + mZed->disableSpatialMapping(); + + RCLCPP_INFO(get_logger(), "=== Spatial Mapping stopped ==="); +} + +bool ZedCamera::startSvoRecording(std::string & errMsg) +{ + sl::RecordingParameters params; + + params.bitrate = mSvoRecBitrate; + params.compression_mode = mSvoRecCompression; + params.target_framerate = mSvoRecFramerate; + params.transcode_streaming_input = mSvoRecTranscode; + params.video_filename = mSvoRecFilename.c_str(); + + sl::ERROR_CODE err = mZed->enableRecording(params); + errMsg = sl::toString(err); + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Error starting SVO recording: " << errMsg); + return false; + } + + mRecording = true; + + return true; +} + +void ZedCamera::stopSvoRecording() +{ + if (mRecording) { + mRecording = false; + mZed->disableRecording(); + } +} + +void ZedCamera::initTransforms() +{ + // According to REP 105 -> http://www.ros.org/reps/rep-0105.html + + // camera_link <- odom <- map + // ^ | + // | | + // --------------------- + + // ----> Dynamic transforms + mOdom2BaseTransf.setIdentity(); // broadcasted if `publish_tf` is true + mMap2OdomTransf.setIdentity(); // broadcasted if `publish_map_tf` is true + mMap2BaseTransf.setIdentity(); // used internally, but not broadcasted + mMap2UtmTransf.setIdentity(); // broadcasted if GNSS Fusion is enabled + // <---- Dynamic transforms +} + +bool ZedCamera::getCamera2BaseTransform() +{ + DEBUG_STREAM_TF( + "Getting static TF from '" << mCenterFrameId.c_str() + << "' to '" << mBaseFrameId.c_str() + << "'"); + + mCamera2BaseTransfValid = false; + + // ----> Static transforms + // Sensor to Base link + try { + // Save the transformation + geometry_msgs::msg::TransformStamped c2b = mTfBuffer->lookupTransform( + mCenterFrameId, mBaseFrameId, TIMEZERO_SYS, rclcpp::Duration(1, 0)); + + // Get the TF2 transformation + // tf2::fromMsg(c2b.transform, mCamera2BaseTransf); + geometry_msgs::msg::Transform in = c2b.transform; + mCamera2BaseTransf.setOrigin( + tf2::Vector3(in.translation.x, in.translation.y, in.translation.z)); + // w at the end in the constructor + mCamera2BaseTransf.setRotation( + tf2::Quaternion( + in.rotation.x, in.rotation.y, in.rotation.z, in.rotation.w)); + + double roll, pitch, yaw; + tf2::Matrix3x3(mCamera2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), + " Static transform Camera Center to Base [%s -> %s]", + mCenterFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + mCamera2BaseTransf.getOrigin().x(), + mCamera2BaseTransf.getOrigin().y(), + mCamera2BaseTransf.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", roll * RAD2DEG, + pitch * RAD2DEG, yaw * RAD2DEG); + } catch (tf2::TransformException & ex) { + if (!mCamera2BaseFirstErr) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Transform error: %s", ex.what()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "The tf from '%s' to '%s' is not available.", + mCenterFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Note: one of the possible cause of the problem is the absence of an " + "instance " + "of the `robot_state_publisher` node publishing the correct static " + "TF transformations " + "or a modified URDF not correctly reproducing the ZED " + "TF chain '%s' -> '%s' -> '%s'", + mBaseFrameId.c_str(), mCenterFrameId.c_str(), mDepthFrameId.c_str()); + mCamera2BaseFirstErr = false; + } + + mCamera2BaseTransf.setIdentity(); + return false; + } + // <---- Static transforms + + mCamera2BaseTransfValid = true; + return true; +} + +bool ZedCamera::getSens2CameraTransform() +{ + DEBUG_STREAM_TF( + "Getting static TF from '" + << mDepthFrameId.c_str() << "' to '" << mCenterFrameId.c_str() + << "'"); + + mSensor2CameraTransfValid = false; + + // ----> Static transforms + // Sensor to Camera Center + try { + // Save the transformation + geometry_msgs::msg::TransformStamped s2c = mTfBuffer->lookupTransform( + mDepthFrameId, mCenterFrameId, TIMEZERO_SYS, rclcpp::Duration(1, 0)); + + // Get the TF2 transformation + // tf2::fromMsg(s2c.transform, mSensor2CameraTransf); + geometry_msgs::msg::Transform in = s2c.transform; + mSensor2CameraTransf.setOrigin( + tf2::Vector3(in.translation.x, in.translation.y, in.translation.z)); + // w at the end in the constructor + mSensor2CameraTransf.setRotation( + tf2::Quaternion( + in.rotation.x, in.rotation.y, in.rotation.z, in.rotation.w)); + + double roll, pitch, yaw; + tf2::Matrix3x3(mSensor2CameraTransf.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), + " Static transform ref. CMOS Sensor to Camera Center [%s -> %s]", + mDepthFrameId.c_str(), mCenterFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + mSensor2CameraTransf.getOrigin().x(), + mSensor2CameraTransf.getOrigin().y(), + mSensor2CameraTransf.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", roll * RAD2DEG, + pitch * RAD2DEG, yaw * RAD2DEG); + } catch (tf2::TransformException & ex) { + if (!mSensor2CameraTransfFirstErr) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Transform error: %s", ex.what()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "The tf from '%s' to '%s' is not available.", + mDepthFrameId.c_str(), mCenterFrameId.c_str()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Note: one of the possible cause of the problem is the absence of an " + "instance " + "of the `robot_state_publisher` node publishing the correct static " + "TF transformations " + "or a modified URDF not correctly reproducing the ZED " + "TF chain '%s' -> '%s' -> '%s'", + mBaseFrameId.c_str(), mCenterFrameId.c_str(), mDepthFrameId.c_str()); + mSensor2CameraTransfFirstErr = false; + } + + mSensor2CameraTransf.setIdentity(); + return false; + } + // <---- Static transforms + + mSensor2CameraTransfValid = true; + return true; +} + +bool ZedCamera::getSens2BaseTransform() +{ + DEBUG_STREAM_TF( + "Getting static TF from '" << mDepthFrameId.c_str() + << "' to '" << mBaseFrameId.c_str() + << "'"); + + mSensor2BaseTransfValid = false; + + // ----> Static transforms + // Sensor to Base link + try { + // Save the transformation + geometry_msgs::msg::TransformStamped s2b = mTfBuffer->lookupTransform( + mDepthFrameId, mBaseFrameId, TIMEZERO_SYS, rclcpp::Duration(1, 0)); + + // Get the TF2 transformation + // tf2::fromMsg(s2b.transform, mSensor2BaseTransf); + geometry_msgs::msg::Transform in = s2b.transform; + mSensor2BaseTransf.setOrigin( + tf2::Vector3(in.translation.x, in.translation.y, in.translation.z)); + // w at the end in the constructor + mSensor2BaseTransf.setRotation( + tf2::Quaternion( + in.rotation.x, in.rotation.y, in.rotation.z, in.rotation.w)); + + double roll, pitch, yaw; + tf2::Matrix3x3(mSensor2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), + " Static transform ref. CMOS Sensor to Base [%s -> %s]", + mDepthFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + mSensor2BaseTransf.getOrigin().x(), + mSensor2BaseTransf.getOrigin().y(), + mSensor2BaseTransf.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", roll * RAD2DEG, + pitch * RAD2DEG, yaw * RAD2DEG); + } catch (tf2::TransformException & ex) { + if (!mSensor2BaseTransfFirstErr) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Transform error: %s", ex.what()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "The tf from '%s' to '%s' is not available.", + mDepthFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Note: one of the possible cause of the problem is the absense of an " + "instance " + "of the `robot_state_publisher` node publishing the correct static " + "TF transformations " + "or a modified URDF not correctly reproducing the ZED " + "TF chain '%s' -> '%s' -> '%s'", + mBaseFrameId.c_str(), mCenterFrameId.c_str(), mDepthFrameId.c_str()); + mSensor2BaseTransfFirstErr = false; + } + + mSensor2BaseTransf.setIdentity(); + return false; + } + + // <---- Static transforms + + mSensor2BaseTransfValid = true; + return true; +} + +bool ZedCamera::getGnss2BaseTransform() +{ + DEBUG_GNSS( + "Getting static TF from '%s' to '%s'", mGnssFrameId.c_str(), + mBaseFrameId.c_str()); + + mGnss2BaseTransfValid = false; + + // ----> Static transforms + // Sensor to Base link + try { + // Save the transformation + geometry_msgs::msg::TransformStamped g2b = mTfBuffer->lookupTransform( + mGnssFrameId, mBaseFrameId, TIMEZERO_SYS, rclcpp::Duration(1, 0)); + + // Get the TF2 transformation + geometry_msgs::msg::Transform in = g2b.transform; + mGnss2BaseTransf.setOrigin( + tf2::Vector3(in.translation.x, in.translation.y, in.translation.z)); + // w at the end in the constructor + mGnss2BaseTransf.setRotation( + tf2::Quaternion( + in.rotation.x, in.rotation.y, + in.rotation.z, in.rotation.w)); + + double roll, pitch, yaw; + tf2::Matrix3x3(mGnss2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), + " Static transform GNSS Antenna to Camera Base [%s -> %s]", + mGnssFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + mGnss2BaseTransf.getOrigin().x(), + mGnss2BaseTransf.getOrigin().y(), + mGnss2BaseTransf.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", roll * RAD2DEG, + pitch * RAD2DEG, yaw * RAD2DEG); + } catch (tf2::TransformException & ex) { + if (!mGnss2BaseTransfFirstErr) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + DEBUG_STREAM_THROTTLE_GNSS(1000.0, "Transform error: " << ex.what()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "The tf from '%s' to '%s' is not available.", + mGnssFrameId.c_str(), mBaseFrameId.c_str()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Note: one of the possible cause of the problem is the absense of an " + "instance " + "of the `robot_state_publisher` node publishing the correct static " + "TF transformations " + "or a modified URDF not correctly reproducing the " + "TF chain '%s' -> '%s'", + mBaseFrameId.c_str(), mGnssFrameId.c_str()); + mGnss2BaseTransfFirstErr = false; + } + + mGnss2BaseTransf.setIdentity(); + return false; + } + // <---- Static transforms + + mGnss2BaseTransfValid = true; + return true; +} + +bool ZedCamera::setPose( + float xt, float yt, float zt, float rr, float pr, + float yr) +{ + initTransforms(); + + if (!mSensor2BaseTransfValid) { + getSens2BaseTransform(); + } + + if (!mSensor2CameraTransfValid) { + getSens2CameraTransform(); + } + + if (!mCamera2BaseTransfValid) { + getCamera2BaseTransform(); + } + + // Apply Base to sensor transform + tf2::Transform initPose; + tf2::Vector3 origin(xt, yt, zt); + initPose.setOrigin(origin); + tf2::Quaternion quat; + quat.setRPY(rr, pr, yr); + initPose.setRotation(quat); + + initPose = initPose * mSensor2BaseTransf.inverse(); + + // SL pose + sl::float3 t_vec; + t_vec[0] = initPose.getOrigin().x(); + t_vec[1] = initPose.getOrigin().y(); + t_vec[2] = initPose.getOrigin().z(); + + sl::float4 q_vec; + q_vec[0] = initPose.getRotation().x(); + q_vec[1] = initPose.getRotation().y(); + q_vec[2] = initPose.getRotation().z(); + q_vec[3] = initPose.getRotation().w(); + + sl::Translation trasl(t_vec); + sl::Orientation orient(q_vec); + mInitialPoseSl.setTranslation(trasl); + mInitialPoseSl.setOrientation(orient); + + return mSensor2BaseTransfValid & mSensor2CameraTransfValid & + mCamera2BaseTransfValid; +} + +void ZedCamera::publishImuFrameAndTopic() +{ + if (!mPublishSensImuTransf && !mPublishImuTF) { + return; + } + + if (!mUsingIPC && mStaticImuTfPublished) { + DEBUG_ONCE_TF("Static Imu TF and Transient Local message already published"); + return; + } + + sl::Orientation sl_rot = mSlCamImuTransf.getOrientation(); + sl::Translation sl_tr = mSlCamImuTransf.getTranslation(); + + auto cameraImuTransfMgs = std::make_unique(); + + cameraImuTransfMgs->header.stamp = get_clock()->now(); + + cameraImuTransfMgs->header.frame_id = mLeftCamFrameId; + cameraImuTransfMgs->child_frame_id = mImuFrameId; + + cameraImuTransfMgs->transform.rotation.x = sl_rot.ox; + cameraImuTransfMgs->transform.rotation.y = sl_rot.oy; + cameraImuTransfMgs->transform.rotation.z = sl_rot.oz; + cameraImuTransfMgs->transform.rotation.w = sl_rot.ow; + + cameraImuTransfMgs->transform.translation.x = sl_tr.x; + cameraImuTransfMgs->transform.translation.y = sl_tr.y; + cameraImuTransfMgs->transform.translation.z = sl_tr.z; + + try { + if (mPubCamImuTransf) { + mPubCamImuTransf->publish(std::move(cameraImuTransfMgs)); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + + // Publish IMU TF as static TF + if (!mPublishImuTF) { + return; + } + + // ----> Publish TF + // RCLCPP_INFO(get_logger(), "Broadcasting Camera-IMU TF "); + + geometry_msgs::msg::TransformStamped transformStamped; + + transformStamped.header.stamp = + get_clock()->now() + rclcpp::Duration(0, mTfOffset * 1e9); + + transformStamped.header.frame_id = mLeftCamFrameId; + transformStamped.child_frame_id = mImuFrameId; + + transformStamped.transform.rotation.x = sl_rot.ox; + transformStamped.transform.rotation.y = sl_rot.oy; + transformStamped.transform.rotation.z = sl_rot.oz; + transformStamped.transform.rotation.w = sl_rot.ow; + + transformStamped.transform.translation.x = sl_tr.x; + transformStamped.transform.translation.y = sl_tr.y; + transformStamped.transform.translation.z = sl_tr.z; + + if (mUsingIPC) { + mTfBroadcaster->sendTransform(transformStamped); + DEBUG_STREAM_TF( + "Broadcasted new IMU dynamic transform: " + << transformStamped.header.frame_id << " -> " << transformStamped.child_frame_id); + } else { + mStaticTfBroadcaster->sendTransform(transformStamped); + DEBUG_STREAM_TF( + "Broadcasted new IMU static transform: " + << transformStamped.header.frame_id << " -> " << transformStamped.child_frame_id); + } + // <---- Publish TF + + // IMU TF publishing diagnostic + double elapsed_sec = mImuTfFreqTimer.toc(); + mPubImuTF_sec->addValue(elapsed_sec); + mImuTfFreqTimer.tic(); + + // Debug info + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, + transformStamped.transform.rotation.z, + transformStamped.transform.rotation.w)) + .getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + " - Broadcasted IMU static transform: " + "TF [" + << transformStamped.header.frame_id << " -> " + << transformStamped.child_frame_id << "] Position: (" + << transformStamped.transform.translation.x << ", " + << transformStamped.transform.translation.y << ", " + << transformStamped.transform.translation.z + << ") - Orientation RPY: (" << roll * RAD2DEG << ", " + << pitch * RAD2DEG << ", " << yaw * RAD2DEG << ")"); + } + + // At the end + mStaticImuTfPublished = true; +} + +void ZedCamera::threadFunc_zedGrab() +{ + DEBUG_STREAM_COMM("Grab thread started"); + + // Set the name of the zedGrab thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_zedGrab")).c_str()); + + // ----> Advanced thread settings + if (mChangeThreadSched) { + DEBUG_STREAM_ADV("Grab thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default GRAB thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + if (mThreadSchedPolicy == "SCHED_OTHER") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_OTHER, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_BATCH") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_BATCH, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_FIFO") { + sched_param par; + par.sched_priority = mThreadPrioGrab; + if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_RR") { + sched_param par; + par.sched_priority = mThreadPrioGrab; + if (pthread_setschedparam(pthread_self(), SCHED_RR, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - Policy not supported"); + } + + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New GRAB thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + } + // <---- Advanced thread settings + + mFrameCount = 0; + + // ----> Grab Runtime parameters + mRunParams.enable_depth = false; + mRunParams.measure3D_reference_frame = sl::REFERENCE_FRAME::CAMERA; + mRunParams.remove_saturated_areas = mRemoveSatAreas; + // <---- Grab Runtime parameters + + // Infinite grab thread + while (1) { + try { + RCLCPP_INFO_STREAM_ONCE(get_logger(), "=== " << mCameraName << " started ==="); + + // ----> Interruption check + DEBUG_STREAM_GRAB("Grab thread: checking for interruption"); + if (!rclcpp::ok()) { + mThreadStop = true; + DEBUG_STREAM_COMM("Ctrl+C received: stopping grab thread"); + break; + } + + if (mThreadStop) { + DEBUG_STREAM_COMM("Grab thread stopped"); + break; + } + // <---- Interruption check + + if (mSvoMode && mSvoPause) { + if (!mGrabOnce) { + rclcpp::sleep_for(100ms); +#ifdef USE_SVO_REALTIME_PAUSE + // Lock on Positional Tracking mutex to avoid race conditions + std::lock_guard lock(mPtMutex); + + // Dummy grab + mZed->grab(); + #endif + continue; + } else { + mGrabOnce = false; // Reset the flag and grab once + } + } + + if (mUseSimTime && !mClockAvailable) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 5000.0, + "Waiting for a valid simulation time on the '/clock' topic..."); + rclcpp::sleep_for(1ms); + continue; + } + + sl_tools::StopWatch grabElabTimer(get_clock()); + + // ----> Apply depth settings + DEBUG_STREAM_GRAB("Grab thread: applying depth settings"); + applyDepthSettings(); + // <---- Apply depth settings + + // ----> Apply video dynamic parameters + if (!mSimMode && !mSvoMode) { + DEBUG_STREAM_GRAB("Grab thread: applying video settings"); + applyVideoSettings(); + } + // <---- Apply video dynamic parameters + + // ----> Check for Positional Tracking requirement + DEBUG_STREAM_GRAB( + "Grab thread: checking Positional Tracking requirement"); + if (isPosTrackingRequired() && !mPosTrackingStarted) { + static int pt_err_count = 0; + if (!startPosTracking()) { + if (++pt_err_count >= 3) { + RCLCPP_FATAL( + get_logger(), + "It's not possible to enable the required Positional " + "Tracking module."); + exit(EXIT_FAILURE); + } + } else { + pt_err_count = 0; + } + } + + if (mGnssFusionEnabled && !mGnssFixValid) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 5000.0, + " * Waiting for the first valid GNSS fix..."); + } + // ----> Check for Positional Tracking requirement + + if (!mDepthDisabled) { + // ----> Check for Spatial Mapping requirement + + DEBUG_STREAM_GRAB("Grab thread: checking Spatial Mapping requirement"); + { + std::lock_guard lock(mMappingMutex); + if (mMappingEnabled && !mSpatialMappingRunning) { + start3dMapping(); + } + if (!mMappingEnabled && mSpatialMappingRunning) { + stop3dMapping(); + } + } + + // <---- Check for Spatial Mapping requirement + + // ----> Check for Object Detection requirement + DEBUG_STREAM_GRAB("Grab thread: checking Object Detection requirement"); + { + std::lock_guard lock(mObjDetMutex); + if (mObjDetEnabled && !mObjDetRunning) { + startObjDetect(); + if (!sl_tools::isObjDetAvailable(mCamRealModel)) { + mObjDetEnabled = false; + } + } + } + // ----> Check for Object Detection requirement + + // ----> Check for Body Tracking requirement + DEBUG_STREAM_GRAB("Grab thread: checking Body Tracking requirement"); + { + std::lock_guard lock(mBodyTrkMutex); + if (mBodyTrkEnabled && !mBodyTrkRunning) { + startBodyTracking(); + if (!sl_tools::isObjDetAvailable(mCamRealModel)) { + mBodyTrkEnabled = false; + } + } + } + // ----> Check for Object Detection requirement + } + + // ----> Grab freq calculation + double elapsed_sec = mGrabFreqTimer.toc(); + mGrabPeriodMean_sec->addValue(elapsed_sec); + mGrabFreqTimer.tic(); + + // RCLCPP_INFO_STREAM(get_logger(), "Grab period: " + // << mGrabPeriodMean_sec->getAvg() / 1e6 + // << " Freq: " << 1e6 / mGrabPeriodMean_usec->getAvg()); + // <---- Grab freq calculation + + // ----> Publish SVO Status information + if (mSvoMode) { + publishSvoStatus(mFrameTimestamp.nanoseconds()); + } + // <---- Publish SVO Status information + + // Lock on Positional Tracking mutex to avoid race conditions + DEBUG_STREAM_GRAB("Grab thread: locking PT mutex for grab"); + std::lock_guard lock(mPtMutex); + + // Start processing timer for diagnostic + grabElabTimer.tic(); + + // ZED grab + DEBUG_STREAM_GRAB("Grab thread: grabbing frame #" << mFrameCount); + + // ----> Params Debug info + if (_debugGrab) { + sl::String json; + mRunParams.encode(json); + DEBUG_STREAM_GRAB( + "Grab thread: Grab parameters: " << std::string(json)); + + mInitParams.encode(json); + DEBUG_STREAM_GRAB( + "Grab thread: Init parameters: " << std::string(json)); + } + // <---- Params Debug info + + if (isDepthRequired() || isPosTrackingRequired()) { + DEBUG_STREAM_GRAB("Grab thread: grabbing..."); + mGrabStatus = mZed->grab(mRunParams); // Process the full pipeline with depth + + } else { + DEBUG_GRAB("Grab thread: reading..."); + mGrabStatus = mZed->read(); // Image and sensor data reading with no depth processing + } + + DEBUG_GRAB("Grab thread: frame grabbed"); + + // ----> Grab errors? + // Note: disconnection are automatically handled by the ZED SDK + if (mGrabStatus != sl::ERROR_CODE::SUCCESS) { + if (mSvoMode && mGrabStatus == sl::ERROR_CODE::END_OF_SVOFILE_REACHED) { + // ----> Check SVO status + if (mSvoLoop) { + mSvoLoopCount++; + mZed->setSVOPosition(mSvoFrameStart); + RCLCPP_WARN_STREAM( + get_logger(), + "SVO reached the end and it has been restarted from frame #" << mSvoFrameStart); + rclcpp::sleep_for( + std::chrono::microseconds( + static_cast(mGrabPeriodMean_sec->getAvg() * 1e6))); + if (mResetPoseWithSvoLoop) { + RCLCPP_WARN( + get_logger(), + " * Camera pose reset to initial conditions."); + + mResetOdomFromSrv = true; + mOdomPath.clear(); + mPosePath.clear(); + + // Restart tracking + startPosTracking(); + } + continue; + } else { + // ----> Stop all the other threads and Timers + mThreadStop = true; + if (mPathTimer) {mPathTimer->cancel();} + if (mFusedPcTimer) {mFusedPcTimer->cancel();} + if (mTempPubTimer) {mTempPubTimer->cancel();} + if (mGnssPubCheckTimer) {mGnssPubCheckTimer->cancel();} + // <---- Stop all the other threads and Timers + + RCLCPP_WARN(get_logger(), "SVO reached the end."); + + // Force SVO status update + if (!publishSvoStatus(mFrameTimestamp.nanoseconds())) { + RCLCPP_WARN(get_logger(), "Node stopped. Press Ctrl+C to exit."); + break; + } else { + if (mPubSvoStatus) { + RCLCPP_WARN_STREAM( + get_logger(), + "Waiting for SVO status subscribers to unsubscribe. Active subscribers: " << + mPubSvoStatus->get_subscription_count()); + } + mDiagUpdater.force_update(); + rclcpp::sleep_for(1s); + continue; + } + } + // <---- Check SVO status + } else if (mGrabStatus == sl::ERROR_CODE::CAMERA_REBOOTING) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Connection issue detected: " + << sl::toString(mGrabStatus).c_str()); + rclcpp::sleep_for(1s); + continue; + } else if (mGrabStatus == sl::ERROR_CODE::CAMERA_NOT_INITIALIZED || + mGrabStatus == sl::ERROR_CODE::FAILURE) + { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera issue detected: " + << sl::toString(mGrabStatus).c_str() << ". Trying to recover the connection..."); + rclcpp::sleep_for(1s); + continue; + } else if (mGrabStatus == sl::ERROR_CODE::CORRUPTED_FRAME) { + RCLCPP_WARN_STREAM( + get_logger(), + "Grab status degraded: " + << sl::toString(mGrabStatus).c_str()); + static const int frame_grab_period = + static_cast(std::round(1000. / mCamGrabFrameRate)); + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "Critical camera error: " << sl::toString(mGrabStatus).c_str() + << ". NODE KILLED."); + mZed.reset(); + exit(EXIT_FAILURE); + } + } + // <---- Grab errors? + + mFrameCount++; + if (mSvoMode) { + mSvoFrameId = mZed->getSVOPosition(); + mSvoFrameCount = mZed->getSVONumberOfFrames(); + + // ----> Publish Clock if required + if (mUseSvoTimestamp && mPublishSvoClock) { + publishClock(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + // <---- Publish Clock if required + } + + if (mGnssFusionEnabled) { + // Process Fusion data + mFusionStatus = mFusion.process(); + // ----> Fusion errors? + if (mFusionStatus != sl::FUSION_ERROR_CODE::SUCCESS && + mFusionStatus != sl::FUSION_ERROR_CODE::NO_NEW_DATA_AVAILABLE) + { + RCLCPP_ERROR_STREAM( + get_logger(), + "Fusion error: " << sl::toString(mFusionStatus).c_str()); + } + // <---- Fusion errors? + } + + // ----> Timestamp + if (mSvoMode) { + if (mUseSvoTimestamp) { + mFrameTimestamp = sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } else { + mFrameTimestamp = + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::CURRENT)); + } + } else if (mSimMode) { + if (mUseSimTime) { + mFrameTimestamp = get_clock()->now(); + } else { + mFrameTimestamp = sl_tools::slTime2Ros( + mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + } else { + mFrameTimestamp = + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + //DEBUG_STREAM_COMM("Grab timestamp: " << mFrameTimestamp.nanoseconds() << " nsec"); + // <---- Timestamp + + if (mStreamingServerRequired && !mStreamingServerRunning) { + DEBUG_STR("Streaming server required, but not running"); + startStreamingServer(); + } + + if (!mSimMode) { + if (mGnssFusionEnabled && mGnssFixNew) { + mGnssFixNew = false; + + rclcpp::Time real_frame_ts = sl_tools::slTime2Ros( + mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + DEBUG_STREAM_GNSS( + "GNSS synced frame ts: " + << real_frame_ts.nanoseconds() << " nsec"); + float dT_sec = (static_cast(real_frame_ts.nanoseconds()) - + static_cast(mGnssTimestamp.nanoseconds())) / + 1e9; + DEBUG_STREAM_GNSS( + "DeltaT: " + << dT_sec << " sec [" << std::fixed << std::setprecision(9) + << static_cast(real_frame_ts.nanoseconds()) / 1e9 << "-" + << static_cast(mGnssTimestamp.nanoseconds()) / 1e9 << "]"); + + if (dT_sec < 0.0) { + RCLCPP_WARN_STREAM( + get_logger(), + "GNSS sensor and ZED Timestamps are not good. dT = " << dT_sec + << " sec"); + } + } + } + + DEBUG_STREAM_GRAB("Grab thread: publishing health status"); + publishHealthStatus(); + + // ----> Check recording status + DEBUG_STREAM_GRAB("Grab thread: checking recording status"); + mRecMutex.lock(); + if (mRecording) { + mRecStatus = mZed->getRecordingStatus(); + static int svo_rec_err_count = 0; + if (mRecStatus.is_recording && !mRecStatus.status) { + if (++svo_rec_err_count > 3) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Error saving frame to SVO"); + } + } else { + svo_rec_err_count = 0; + } + } + mRecMutex.unlock(); + // <---- Check recording status + + // ----> Retrieve Image/Depth data if someone has subscribed to + DEBUG_STREAM_GRAB("Grab thread: retrieving Image/Depth data"); + processVideoDepth(); + // <---- Retrieve Image/Depth data if someone has subscribed to + + if (!mDepthDisabled) { + // ----> Retrieve the point cloud if someone has subscribed to + DEBUG_STREAM_GRAB("Grab thread: retrieving Point Cloud data"); + processPointCloud(); + // <---- Retrieve the point cloud if someone has subscribed to + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use `GEN_3` even if depth is disabled + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (!mDepthDisabled) { +#endif + // ----> Localization processing + DEBUG_STREAM_GRAB("Grab thread: Localization processing"); + if (mPosTrackingStarted) { + if (!mSvoPause) { + DEBUG_PT( + "==============================================================" + "=="); + DEBUG_PT("=== Positional Tracking status ==="); + // Update Positional Tracking status + mPosTrackingStatus = mZed->getPositionalTrackingStatus(); + + DEBUG_STREAM_PT( + " * Odometry: " + << sl::toString(mPosTrackingStatus.odometry_status).c_str() ); + DEBUG_STREAM_PT( + " * Spatial Memory: " + << sl::toString(mPosTrackingStatus.spatial_memory_status).c_str()); + DEBUG_STREAM_PT( + " * Tracking fusion: " + << sl::toString(mPosTrackingStatus.tracking_fusion_status).c_str()); + + DEBUG_PT("=== processOdometry ==="); + processOdometry(); + DEBUG_PT("=== processPose ==="); + processPose(); + if (mGnssFusionEnabled) { + if (mSvoMode) { + DEBUG_PT("=== processSvoGnssData ==="); + processSvoGnssData(); + } + DEBUG_PT("=== processGeoPose ==="); + processGeoPose(); + } + } + + // Publish `odom` and `map` TFs at the grab frequency + // RCLCPP_INFO(get_logger(), "Publishing TF -> threadFunc_zedGrab"); + DEBUG_TF("=== publishTFs ==="); + publishTFs(mFrameTimestamp); + } + // <---- Localization processing + } + + if (!mDepthDisabled) { + DEBUG_STREAM_GRAB("Grab thread: Object Detection processing"); + { + std::lock_guard lock(mObjDetMutex); + if (mObjDetRunning) { + processDetectedObjects(mFrameTimestamp); + } + } + + DEBUG_STREAM_GRAB("Grab thread: Body Tracking processing"); + { + std::lock_guard lock(mBodyTrkMutex); + if (mBodyTrkRunning) { + processBodies(mFrameTimestamp); + } + } + + DEBUG_STREAM_GRAB("Grab thread: Region of interest processing"); + // ----> Region of interest + processRtRoi(mFrameTimestamp); + // <---- Region of interest + } + + // Diagnostic statistics update + double mean_elab_sec = mElabPeriodMean_sec->addValue(grabElabTimer.toc()); + } catch (const std::exception & e) { + rcutils_reset_error(); + RCLCPP_ERROR_STREAM( + get_logger(), + "threadFunc_zedGrab: Exception: " << e.what()); + continue; + } catch (...) { + rcutils_reset_error(); + RCLCPP_ERROR( + get_logger(), + "threadFunc_zedGrab: Unknown exception."); + continue; + } + + if (mSvoMode && !mSvoRealtime) { + double effective_grab_period = mElabPeriodMean_sec->getAvg(); + mSvoExpectedPeriod = 1.0 / (mSvoRate * static_cast(mCamGrabFrameRate)); + double sleep = std::max(0.001, mSvoExpectedPeriod - effective_grab_period); + rclcpp::sleep_for(std::chrono::milliseconds(static_cast(sleep * 1000))); + + + DEBUG_STREAM_COMM( + "SVO sleep time: " << sleep << " sec - Expecter grab period:" + << mSvoExpectedPeriod << " sec - Elab time:" + << effective_grab_period << " sec"); + } + + DEBUG_STREAM_GRAB("Grab thread: iteration completed"); + } + + // Stop the heartbeat + mHeartbeatTimer->cancel(); + + DEBUG_STREAM_COMM("Grab thread finished"); +} + +bool ZedCamera::publishSensorsData(rclcpp::Time force_ts) +{ + if (mGrabStatus != sl::ERROR_CODE::SUCCESS && mGrabStatus != sl::ERROR_CODE::CORRUPTED_FRAME) { + DEBUG_SENS("Camera not ready. Sensor data not published"); + rclcpp::sleep_for(1s); + return false; + } + + // ----> Subscribers count + DEBUG_STREAM_SENS("Sensors callback: counting subscribers"); + + size_t imu_SubCount = 0; + size_t imu_RawSubCount = 0; + size_t imu_TempSubCount = 0; + size_t imu_MagSubCount = 0; + size_t pressSubCount = 0; + + try { + if (mPubImu) {imu_SubCount = count_subscribers(mPubImu->get_topic_name());} + if (mPubImuRaw) {imu_RawSubCount = count_subscribers(mPubImuRaw->get_topic_name());} + imu_MagSubCount = 0; + pressSubCount = 0; + + if (sl_tools::isZED2OrZED2i(mCamRealModel)) { + if (mPubImuMag) {imu_MagSubCount = count_subscribers(mPubImuMag->get_topic_name());} + if (mPubPressure) {pressSubCount = count_subscribers(mPubPressure->get_topic_name());} + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_SENS("pubSensorsData: Exception while counting subscribers"); + return false; + } + // <---- Subscribers count + + // ----> Grab data and setup timestamps + DEBUG_STREAM_ONCE_SENS("Sensors callback: Grab data and setup timestamps"); + rclcpp::Time ts_imu; + rclcpp::Time ts_baro; + rclcpp::Time ts_mag; + + rclcpp::Time now = get_clock()->now(); + + sl::SensorsData sens_data; + sl::ERROR_CODE err; + + if (mSensCameraSync) { + err = mZed->getSensorsData(sens_data, sl::TIME_REFERENCE::IMAGE); + } else { + err = mZed->getSensorsData(sens_data, sl::TIME_REFERENCE::CURRENT); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + // Only warn if not in SVO mode or if the error is not a benign sensor + // unavailability + if (!mSvoMode || err != sl::ERROR_CODE::SENSORS_NOT_AVAILABLE) { + RCLCPP_WARN_STREAM( + get_logger(), + "[publishSensorsData] sl::getSensorsData error: " + << sl::toString(err).c_str()); + } + return false; + } + + if (mSensCameraSync) { + ts_imu = force_ts; + ts_baro = force_ts; + ts_mag = force_ts; + } else if (mSvoMode && !mUseSvoTimestamp) { + ts_imu = now; + ts_baro = now; + ts_mag = now; + } else if (mSimMode) { + if (mUseSimTime) { + ts_imu = now; + } else { + ts_imu = sl_tools::slTime2Ros(sens_data.imu.timestamp); + } + ts_baro = ts_imu; + ts_mag = ts_imu; + } else { + ts_imu = sl_tools::slTime2Ros(sens_data.imu.timestamp); + ts_baro = sl_tools::slTime2Ros(sens_data.barometer.timestamp); + ts_mag = sl_tools::slTime2Ros(sens_data.magnetometer.timestamp); + } + // <---- Grab data and setup timestamps + + // ----> Check for duplicated data + bool new_imu_data = ts_imu != mLastTs_imu; + double dT = ts_imu.seconds() - mLastTs_imu.seconds(); + + bool new_baro_data = ts_baro != mLastTs_baro; + mLastTs_baro = ts_baro; + bool new_mag_data = ts_mag != mLastTs_mag; + mLastTs_mag = ts_mag; + + // ----> Respect data frequency for SVO2 + if (mSvoMode) { + const double imu_period = 1.0 / mSensPubRate; + + if (dT < imu_period) { + DEBUG_SENS("SENSOR: IMU data not ready yet"); + return false; + } + } + DEBUG_STREAM_SENS( + "IMU TS: " << ts_imu.seconds() << " - Grab TS: " << mFrameTimestamp.seconds() << " - Diff: " << + mFrameTimestamp.seconds() - ts_imu.seconds()); + // <---- Respect data frequency for SVO2 + + if (!new_imu_data && !new_baro_data && !new_mag_data) { + DEBUG_STREAM_SENS("No new sensors data"); + return false; + } + + if (mSimMode) { + new_baro_data = false; + new_mag_data = false; + } + // <---- Check for duplicated data + + mLastTs_imu = ts_imu; + + DEBUG_STREAM_SENS("SENSOR LAST PERIOD: " << dT << " sec @" << 1. / dT << " Hz"); + + // ----> Sensors freq for diagnostic + if (new_imu_data) { + double mean = mImuPeriodMean_sec->addValue(mImuFreqTimer.toc()); + mImuFreqTimer.tic(); + + DEBUG_STREAM_SENS("Thread New data MEAN freq: " << 1. / mean); + } + + if (new_baro_data) { + double mean = mBaroPeriodMean_sec->addValue(mBaroFreqTimer.toc()); + mBaroFreqTimer.tic(); + DEBUG_STREAM_SENS("Barometer freq: " << 1. / mean); + } + + if (new_mag_data) { + double mean = mMagPeriodMean_sec->addValue(mMagFreqTimer.toc()); + mMagFreqTimer.tic(); + + DEBUG_STREAM_SENS("Magnetometer freq: " << 1. / mean); + } + // <---- Sensors freq for diagnostic + + // ----> Sensors data publishing + if (new_imu_data) { + publishImuFrameAndTopic(); + + if (imu_SubCount > 0) { + mImuPublishing = true; + + auto imuMsg = std::make_unique(); + + imuMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts_imu; + imuMsg->header.frame_id = mImuFrameId; + + imuMsg->orientation.x = sens_data.imu.pose.getOrientation()[0]; + imuMsg->orientation.y = sens_data.imu.pose.getOrientation()[1]; + imuMsg->orientation.z = sens_data.imu.pose.getOrientation()[2]; + imuMsg->orientation.w = sens_data.imu.pose.getOrientation()[3]; + + imuMsg->angular_velocity.x = sens_data.imu.angular_velocity[0] * DEG2RAD; + imuMsg->angular_velocity.y = sens_data.imu.angular_velocity[1] * DEG2RAD; + imuMsg->angular_velocity.z = sens_data.imu.angular_velocity[2] * DEG2RAD; + + imuMsg->linear_acceleration.x = sens_data.imu.linear_acceleration[0]; + imuMsg->linear_acceleration.y = sens_data.imu.linear_acceleration[1]; + imuMsg->linear_acceleration.z = sens_data.imu.linear_acceleration[2]; + + // ----> Covariances copy + // Note: memcpy not allowed because ROS 2 uses double and ZED SDK uses + // float + for (int i = 0; i < 3; ++i) { + int r = 0; + + if (i == 0) { + r = 0; + } else if (i == 1) { + r = 1; + } else { + r = 2; + } + + imuMsg->orientation_covariance[i * 3 + 0] = + sens_data.imu.pose_covariance.r[r * 3 + 0] * DEG2RAD * DEG2RAD; + imuMsg->orientation_covariance[i * 3 + 1] = + sens_data.imu.pose_covariance.r[r * 3 + 1] * DEG2RAD * DEG2RAD; + imuMsg->orientation_covariance[i * 3 + 2] = + sens_data.imu.pose_covariance.r[r * 3 + 2] * DEG2RAD * DEG2RAD; + + imuMsg->linear_acceleration_covariance[i * 3 + 0] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 0]; + imuMsg->linear_acceleration_covariance[i * 3 + 1] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 1]; + imuMsg->linear_acceleration_covariance[i * 3 + 2] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 2]; + + imuMsg->angular_velocity_covariance[i * 3 + 0] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 0] * DEG2RAD * + DEG2RAD; + imuMsg->angular_velocity_covariance[i * 3 + 1] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 1] * DEG2RAD * + DEG2RAD; + imuMsg->angular_velocity_covariance[i * 3 + 2] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 2] * DEG2RAD * + DEG2RAD; + } + // <---- Covariances copy + + DEBUG_STREAM_SENS("Publishing IMU message"); + try { + if (mPubImu) {mPubImu->publish(std::move(imuMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } else { + mImuPublishing = false; + } + + if (imu_RawSubCount > 0) { + mImuPublishing = true; + + auto imuRawMsg = std::make_unique(); + + imuRawMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts_imu; + imuRawMsg->header.frame_id = mImuFrameId; + + imuRawMsg->angular_velocity.x = + sens_data.imu.angular_velocity_uncalibrated[0] * DEG2RAD; + imuRawMsg->angular_velocity.y = + sens_data.imu.angular_velocity_uncalibrated[1] * DEG2RAD; + imuRawMsg->angular_velocity.z = + sens_data.imu.angular_velocity_uncalibrated[2] * DEG2RAD; + + imuRawMsg->linear_acceleration.x = sens_data.imu.linear_acceleration_uncalibrated[0]; + imuRawMsg->linear_acceleration.y = sens_data.imu.linear_acceleration_uncalibrated[1]; + imuRawMsg->linear_acceleration.z = sens_data.imu.linear_acceleration_uncalibrated[2]; + + // ----> Covariances copy + // Note: memcpy not allowed because ROS 2 uses double and ZED SDK uses + // float + for (int i = 0; i < 3; ++i) { + int r = 0; + + if (i == 0) { + r = 0; + } else if (i == 1) { + r = 1; + } else { + r = 2; + } + + imuRawMsg->linear_acceleration_covariance[i * 3 + 0] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 0]; + imuRawMsg->linear_acceleration_covariance[i * 3 + 1] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 1]; + imuRawMsg->linear_acceleration_covariance[i * 3 + 2] = + sens_data.imu.linear_acceleration_covariance.r[r * 3 + 2]; + + imuRawMsg->angular_velocity_covariance[i * 3 + 0] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 0] * DEG2RAD * + DEG2RAD; + imuRawMsg->angular_velocity_covariance[i * 3 + 1] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 1] * DEG2RAD * + DEG2RAD; + imuRawMsg->angular_velocity_covariance[i * 3 + 2] = + sens_data.imu.angular_velocity_covariance.r[r * 3 + 2] * DEG2RAD * + DEG2RAD; + } + // <---- Covariances copy + + DEBUG_STREAM_SENS("Publishing IMU RAW message"); + try { + if (mPubImuRaw) {mPubImuRaw->publish(std::move(imuRawMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + } + + if (sens_data.barometer.is_available && new_baro_data) { + if (pressSubCount > 0) { + mBaroPublishing = true; + + auto pressMsg = std::make_unique(); + + pressMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts_baro; + pressMsg->header.frame_id = mBaroFrameId; + pressMsg->fluid_pressure = + sens_data.barometer.pressure; // Pascals -> see + // https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/FluidPressure.msg + pressMsg->variance = 1.0585e-2; + + DEBUG_STREAM_SENS("Publishing PRESS message"); + try { + if (mPubPressure) {mPubPressure->publish(std::move(pressMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } else { + mBaroPublishing = false; + } + } + + if (sens_data.magnetometer.is_available && new_mag_data) { + if (imu_MagSubCount > 0) { + mMagPublishing = true; + + auto magMsg = std::make_unique(); + + magMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts_mag; + magMsg->header.frame_id = mMagFrameId; + magMsg->magnetic_field.x = + sens_data.magnetometer.magnetic_field_calibrated.x * 1e-6; // Tesla + magMsg->magnetic_field.y = + sens_data.magnetometer.magnetic_field_calibrated.y * 1e-6; // Tesla + magMsg->magnetic_field.z = + sens_data.magnetometer.magnetic_field_calibrated.z * 1e-6; // Tesla + magMsg->magnetic_field_covariance[0] = 0.039e-6; + magMsg->magnetic_field_covariance[1] = 0.0f; + magMsg->magnetic_field_covariance[2] = 0.0f; + magMsg->magnetic_field_covariance[3] = 0.0f; + magMsg->magnetic_field_covariance[4] = 0.037e-6; + magMsg->magnetic_field_covariance[5] = 0.0f; + magMsg->magnetic_field_covariance[6] = 0.0f; + magMsg->magnetic_field_covariance[7] = 0.0f; + magMsg->magnetic_field_covariance[8] = 0.047e-6; + + DEBUG_STREAM_SENS("Publishing MAG message"); + try { + if (mPubImuMag) {mPubImuMag->publish(std::move(magMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } else { + mMagPublishing = false; + } + } + // <---- Sensors data publishing + + return true; +} + +void ZedCamera::publishTFs(rclcpp::Time t) +{ + // DEBUG_STREAM_TF("publishTFs"); + + // RCLCPP_INFO_STREAM(get_logger(), "publishTFs - t type:" << + // t.get_clock_type()); + + if (!mPosTrackingReady) { + return; + } + + if (t == TIMEZERO_ROS) { + DEBUG_STREAM_TF("Time zero: not publishing TFs"); + return; + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (!mDepthDisabled) { +#endif + if (mPublishTF) { + publishOdomTF(t); // publish the base Frame in odometry frame + + if (mPublishMapTF) { + publishPoseTF(t); // publish the odometry Frame in map frame + } + } + } + + // Publish the camera TFs at the grab frequency, as they can be used by other nodes even if localization is not working + publishCameraTFs(t); +} + +void ZedCamera::publishCameraTFs(rclcpp::Time t) +{ + // DEBUG_STREAM_TF("publishCameraTFs"); + + if (!mZed) { + DEBUG_STREAM_TF("ZED Camera not initialized"); + return; + } + + if (!mUsingIPC && mStaticTfPublished) { + DEBUG_ONCE_TF("Static Camera TF already broadcasted"); + return; + } + + auto calib_params = mZed->getCameraInformation().camera_configuration.calibration_parameters; + const double baseline = static_cast(calib_params.getCameraBaseline()); + sl::Transform stereo_transform = calib_params.stereo_transform; + + DEBUG_STREAM_TF("Calibrated Camera baseline: " << baseline << " m"); + DEBUG_STREAM_TF( + "SDK Stereo Transform T: [" << stereo_transform.getTranslation().x << ", " + << stereo_transform.getTranslation().y << ", " + << stereo_transform.getTranslation().z << "]"); + DEBUG_STREAM_TF( + "SDK Stereo Transform R: [" + << stereo_transform.getOrientation().x << ", " + << stereo_transform.getOrientation().y << ", " + << stereo_transform.getOrientation().z << ", " + << stereo_transform.getOrientation().w << "]"); + + // ----> Validate data + bool not_valid = false; + const double EPSILON = 1e-6; + + if (std::abs(stereo_transform.getTranslation().x) > EPSILON || + std::abs(stereo_transform.getTranslation().z) > EPSILON) + { + RCLCPP_WARN_STREAM( + get_logger(), + "Unexpected calibrated stereo transform translation: [" + << stereo_transform.getTranslation().x << ", " + << stereo_transform.getTranslation().y << ", " + << stereo_transform.getTranslation().z << "]. Expected [0, " << baseline << ", 0]."); + not_valid = true; + } + if (std::abs(stereo_transform.getOrientation().x) > EPSILON || + std::abs(stereo_transform.getOrientation().y) > EPSILON || + std::abs(stereo_transform.getOrientation().z) > EPSILON || + std::abs(stereo_transform.getOrientation().w - 1.0) > EPSILON) + { + RCLCPP_WARN_STREAM( + get_logger(), + "Unexpected calibrated stereo transform rotation: [" + << stereo_transform.getOrientation().x << ", " + << stereo_transform.getOrientation().y << ", " + << stereo_transform.getOrientation().z << ", " + << stereo_transform.getOrientation().w << "]. Expected [0, 0, 0, 1]."); + not_valid = true; + } + // Note: "baseline" is a positive value, while the stereo transform y-translation is expected to be a negative value. + if (std::abs(baseline + stereo_transform.getTranslation().y) > EPSILON) { + RCLCPP_WARN_STREAM( + get_logger(), + "Baseline mismatch: Camera baseline is " << baseline << " m but calibrated stereo transform y-translation is " << + stereo_transform.getTranslation().y << " m."); + not_valid = true; + } + + if (not_valid) { + RCLCPP_WARN_STREAM( + get_logger(), "Please report this problem to Stereolabs support if you see this message, " + "adding information about your camera model and serial number: " + << sl::toString(mCamRealModel) << " - " << mCamSerialNumber); + return; + } + // <---- Validate data + + double optical_offset_x = 0.0; + + switch (mCamRealModel) { + case sl::MODEL::ZED: + optical_offset_x = -0.01; + break; + case sl::MODEL::ZED_M: + optical_offset_x = 0.0; + break; + case sl::MODEL::ZED2: + optical_offset_x = -0.01; + break; + case sl::MODEL::ZED2i: + optical_offset_x = -0.01; + break; + case sl::MODEL::ZED_X: + case sl::MODEL::ZED_XM: + case sl::MODEL::ZED_X_HDR: + case sl::MODEL::ZED_X_HDR_MAX: + case sl::MODEL::ZED_X_HDR_MINI: + optical_offset_x = -0.01; + break; + case sl::MODEL::VIRTUAL_ZED_X: + optical_offset_x = -0.01; + break; + default: + RCLCPP_ERROR_STREAM( + get_logger(), + "Unknown camera model for static TF publishing: " + << sl::toString(mCamRealModel).c_str()); + mZed.reset(); + exit(EXIT_FAILURE); + } + + // ----> Lambda function to publish transform with debug info + auto publishTransform = [&](const geometry_msgs::msg::TransformStamped & tf) { + if (mUsingIPC) { + mTfBroadcaster->sendTransform(tf); + DEBUG_STREAM_TF( + "Broadcasted new dynamic transform: " + << tf.header.frame_id << " -> " << tf.child_frame_id); + } else { + mStaticTfBroadcaster->sendTransform(tf); + DEBUG_STREAM_TF( + "Broadcasted new static transform: " + << tf.header.frame_id << " -> " << tf.child_frame_id); + } + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + tf.transform.rotation.x, tf.transform.rotation.y, + tf.transform.rotation.z, tf.transform.rotation.w)).getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + tf.header.stamp.sec << "." << tf.header.stamp.nanosec + << " - TF [" + << tf.header.frame_id << " -> " << tf.child_frame_id + << "] Position: (" << tf.transform.translation.x << ", " + << tf.transform.translation.y << ", " + << tf.transform.translation.z << ") - Orientation RPY: (" + << roll * RAD2DEG << ", " << pitch * RAD2DEG << ", " + << yaw * RAD2DEG << ")"); + } + }; + // <---- Lambda function to publish transform with debug info + + // ----> Common info + geometry_msgs::msg::TransformStamped transformStamped; + transformStamped.header.stamp = + mUsePubTimestamps ? get_clock()->now() : + (t + rclcpp::Duration(0, mTfOffset * 1e9)); + // <---- Common info + + // ----> New camera_center -> left_camera_frame + transformStamped.header.frame_id = mCenterFrameId; + transformStamped.child_frame_id = mLeftCamFrameId; + + transformStamped.transform.translation.x = optical_offset_x; + transformStamped.transform.translation.y = baseline / 2.0; + transformStamped.transform.translation.z = 0.0; + transformStamped.transform.rotation.x = 0.0; + transformStamped.transform.rotation.y = 0.0; + transformStamped.transform.rotation.z = 0.0; + transformStamped.transform.rotation.w = 1.0; + + // Publish transformation + publishTransform(transformStamped); + // <---- New camera_center -> left_camera_frame + + // ----> New camera_center -> right_camera_frame + transformStamped.header.frame_id = mCenterFrameId; + transformStamped.child_frame_id = mRightCamFrameId; + + transformStamped.transform.translation.x = optical_offset_x; + transformStamped.transform.translation.y = -baseline / 2.0; + transformStamped.transform.translation.z = 0.0; + transformStamped.transform.rotation.x = 0.0; + transformStamped.transform.rotation.y = 0.0; + transformStamped.transform.rotation.z = 0.0; + transformStamped.transform.rotation.w = 1.0; + + // Publish transformation + publishTransform(transformStamped); + // <---- New camera_center -> right_camera_frame + + // at the end + mStaticTfPublished = true; +} + +void ZedCamera::publishOdomTF(rclcpp::Time t) +{ + // DEBUG_STREAM_TF("publishOdomTF"); + + // ----> Avoid duplicated TF publishing + if (t == mLastTs_odom) { + return; + } + mLastTs_odom = t; + // <---- Avoid duplicated TF publishing + + if (!mSensor2BaseTransfValid) { + getSens2BaseTransform(); + } + + if (!mSensor2CameraTransfValid) { + getSens2CameraTransform(); + } + + if (!mCamera2BaseTransfValid) { + getCamera2BaseTransform(); + } + + geometry_msgs::msg::TransformStamped transformStamped; + + transformStamped.header.stamp = + mUsePubTimestamps ? get_clock()->now() : + (t + rclcpp::Duration(0, mTfOffset * 1e9)); + + // RCLCPP_INFO_STREAM(get_logger(), "Odom TS: " << + // transformStamped.header.stamp); + + transformStamped.header.frame_id = mOdomFrameId; + transformStamped.child_frame_id = mBaseFrameId; + // conversion from Tranform to message + tf2::Vector3 translation = mOdom2BaseTransf.getOrigin(); + tf2::Quaternion quat = mOdom2BaseTransf.getRotation(); + transformStamped.transform.translation.x = translation.x(); + transformStamped.transform.translation.y = translation.y(); + transformStamped.transform.translation.z = translation.z(); + transformStamped.transform.rotation.x = quat.x(); + transformStamped.transform.rotation.y = quat.y(); + transformStamped.transform.rotation.z = quat.z(); + transformStamped.transform.rotation.w = quat.w(); + + // Publish transformation + mTfBroadcaster->sendTransform(transformStamped); + + // Odom TF publishing diagnostic + double elapsed_sec = mOdomFreqTimer.toc(); + mPubOdomTF_sec->addValue(elapsed_sec); + mOdomFreqTimer.tic(); + + // Debug info + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, + transformStamped.transform.rotation.z, + transformStamped.transform.rotation.w)) + .getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + transformStamped.header.stamp.sec << "." << transformStamped.header.stamp.nanosec + << " - TF [" << transformStamped.header.frame_id << " -> " + << transformStamped.child_frame_id << "] Position: (" + << transformStamped.transform.translation.x << ", " + << transformStamped.transform.translation.y << ", " + << transformStamped.transform.translation.z + << ") - Orientation RPY: (" << roll * RAD2DEG << ", " + << pitch * RAD2DEG << ", " << yaw * RAD2DEG << ")"); + } +} + +void ZedCamera::publishPoseTF(rclcpp::Time t) +{ + // DEBUG_STREAM_TF("publishPoseTF"); + + // ----> Avoid duplicated TF publishing + if (t == mLastTs_pose) { + return; + } + mLastTs_pose = t; + // <---- Avoid duplicated TF publishing + + if (!mSensor2BaseTransfValid) { + getSens2BaseTransform(); + } + + if (!mSensor2CameraTransfValid) { + getSens2CameraTransform(); + } + + if (!mCamera2BaseTransfValid) { + getCamera2BaseTransform(); + } + + geometry_msgs::msg::TransformStamped transformStamped; + + transformStamped.header.stamp = + mUsePubTimestamps ? get_clock()->now() : + (t + rclcpp::Duration(0, mTfOffset * 1e9)); + transformStamped.header.frame_id = mMapFrameId; + transformStamped.child_frame_id = mOdomFrameId; + // conversion from Tranform to message + tf2::Vector3 translation = mMap2OdomTransf.getOrigin(); + tf2::Quaternion quat = mMap2OdomTransf.getRotation(); + transformStamped.transform.translation.x = translation.x(); + transformStamped.transform.translation.y = translation.y(); + transformStamped.transform.translation.z = translation.z(); + transformStamped.transform.rotation.x = quat.x(); + transformStamped.transform.rotation.y = quat.y(); + transformStamped.transform.rotation.z = quat.z(); + transformStamped.transform.rotation.w = quat.w(); + + // Publish transformation + mTfBroadcaster->sendTransform(transformStamped); + + // Pose TF publishing diagnostic + double elapsed_sec = mPoseFreqTimer.toc(); + mPubPoseTF_sec->addValue(elapsed_sec); + mPoseFreqTimer.tic(); + + // Debug info + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, + transformStamped.transform.rotation.z, + transformStamped.transform.rotation.w)) + .getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + transformStamped.header.stamp.sec << "." << transformStamped.header.stamp.nanosec + << " - TF [" << transformStamped.header.frame_id << " -> " + << transformStamped.child_frame_id << "] Position: (" + << transformStamped.transform.translation.x << ", " + << transformStamped.transform.translation.y << ", " + << transformStamped.transform.translation.z + << ") - Orientation RPY: (" << roll * RAD2DEG << ", " + << pitch * RAD2DEG << ", " << yaw * RAD2DEG << ")"); + } +} + +void ZedCamera::threadFunc_pubSensorsData() +{ + DEBUG_STREAM_SENS("Sensors thread started"); + + // Set the name of the pubSensorsData thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_pubSensorsData")).c_str()); + + // ----> Advanced thread settings + if (mChangeThreadSched) { + DEBUG_STREAM_ADV("Sensors thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default Sensors thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + if (mThreadSchedPolicy == "SCHED_OTHER") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_OTHER, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_BATCH") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_BATCH, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_FIFO") { + sched_param par; + par.sched_priority = mThreadPrioSens; + if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_RR") { + sched_param par; + par.sched_priority = mThreadPrioSens; + if (pthread_setschedparam(pthread_self(), SCHED_RR, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else { + RCLCPP_WARN_STREAM( + get_logger(), + " ! Failed to set thread params! - Policy not supported"); + } + + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New Sensors thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + } + // <---- Advanced thread settings + + while (1) { + try { + if (!rclcpp::ok()) { + DEBUG_STREAM_SENS("Ctrl+C received: stopping sensors thread"); + mThreadStop = true; + break; + } + if (mThreadStop) { + DEBUG_STREAM_SENS( + "threadFunc_pubSensorsData (2): Sensors thread stopped"); + break; + } + + if (mSvoMode && mSvoPause) { + if (!mGrabImuOnce) { + rclcpp::sleep_for(100ms); + continue; + } else { + mGrabImuOnce = false; // Reset the flag and grab the IMU data + } + } + + // std::lock_guard lock(mCloseZedMutex); + if (!mZed->isOpened()) { + DEBUG_STREAM_SENS( + "threadFunc_pubSensorsData: the camera is not open"); + continue; + } + + if (!publishSensorsData()) { + auto sleep_usec = + static_cast(mSensRateComp * (1000000. / mSensPubRate)); + sleep_usec = std::max(100, sleep_usec); + DEBUG_STREAM_SENS( + "[threadFunc_pubSensorsData] Thread sleep: " + << sleep_usec << " µsec"); + rclcpp::sleep_for( + std::chrono::microseconds(sleep_usec)); // Avoid busy-waiting + continue; + } + + // ----> Check publishing frequency + double sens_period_usec = 1e6 / mSensPubRate; + double avg_freq = 1. / mImuPeriodMean_sec->getAvg(); + + double err = std::fabs(mSensPubRate - avg_freq); + + const double COMP_P_GAIN = 0.0005; + + if (avg_freq < mSensPubRate) { + mSensRateComp -= COMP_P_GAIN * err; + } else if (avg_freq > mSensPubRate) { + mSensRateComp += COMP_P_GAIN * err; + } + + mSensRateComp = std::max(0.001, mSensRateComp); + mSensRateComp = std::min(3.0, mSensRateComp); + DEBUG_STREAM_SENS( + "[threadFunc_pubSensorsData] mSensRateComp: " << mSensRateComp); + // <---- Check publishing frequency + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("threadFunc_pubSensorsData: Generic exception."); + continue; + } + } + + DEBUG_STREAM_SENS("Sensors thread finished"); +} + +void ZedCamera::processOdometry() +{ + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + + if (!mSensor2BaseTransfValid) { + getSens2BaseTransform(); + } + + if (!mSensor2CameraTransfValid) { + getSens2CameraTransform(); + } + + if (!mCamera2BaseTransfValid) { + getCamera2BaseTransform(); + } + + sl::Pose deltaOdom; + linear_base = tf2::Vector3(0.0, 0.0, 0.0); + angular_base = tf2::Vector3(0.0, 0.0, 0.0); + + if (mResetOdomFromSrv || (mResetOdomWhenLoopClosure && + mPosTrackingStatus.spatial_memory_status == + sl::SPATIAL_MEMORY_STATUS::LOOP_CLOSED)) + { + + if (mPosTrackingStatus.spatial_memory_status == + sl::SPATIAL_MEMORY_STATUS::LOOP_CLOSED) + { + DEBUG_PT("=== Odometry reset for LOOP CLOSURE event ==="); + } + + // Propagate Odom transform in time + mOdom2BaseTransf.setIdentity(); + mOdomPath.clear(); + + mResetOdomFromSrv = false; + } else { + if (!mGnssFusionEnabled) { + mZed->getPosition(deltaOdom, sl::REFERENCE_FRAME::CAMERA); + } else { + mFusion.getPosition( + deltaOdom, sl::REFERENCE_FRAME::CAMERA /*,mCamUuid*/, + sl::CameraIdentifier(), sl::POSITION_TYPE::FUSION); + } + mLastZedDeltaOdom = deltaOdom; + + DEBUG_STREAM_PT( + "MAP -> Odometry Status: " + << sl::toString(mPosTrackingStatus.odometry_status).c_str()); + + DEBUG_PT( + "delta ODOM %s- [%s]:\n%s", _debugGnss ? "(`sl::Fusion`) " : "", + sl::toString(mPosTrackingStatus.odometry_status).c_str(), + deltaOdom.pose_data.getInfos().c_str()); + + if (_debugGnss) { + sl::Pose camera_delta_odom; + auto status = + mZed->getPosition(camera_delta_odom, sl::REFERENCE_FRAME::CAMERA); + + DEBUG_PT( + "delta ODOM (`sl::Camera`) [%s]:\n%s", + sl::toString(status).c_str(), + camera_delta_odom.pose_data.getInfos().c_str()); + } + + if (mPosTrackingStatus.odometry_status == sl::ODOMETRY_STATUS::OK) { + sl::Translation translation = deltaOdom.getTranslation(); + sl::Orientation quat = deltaOdom.getOrientation(); + + // Transform ZED delta odom pose in TF2 Transformation + tf2::Transform deltaOdomTf; + deltaOdomTf.setOrigin( + tf2::Vector3(translation(0), translation(1), translation(2))); + // w at the end in the constructor + deltaOdomTf.setRotation( + tf2::Quaternion(quat(0), quat(1), quat(2), quat(3))); + + // delta odom from sensor to base frame + tf2::Transform deltaOdomTf_base = + mSensor2BaseTransf.inverse() * deltaOdomTf * mSensor2BaseTransf; + + // Propagate Odom transform in time + mOdom2BaseTransf = mOdom2BaseTransf * deltaOdomTf_base; + + if (mTwoDMode) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + tf2::Vector3 tr_2d = mOdom2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mOdom2BaseTransf.setOrigin(tr_2d); + + double roll, pitch, yaw; + tf2::Matrix3x3(mOdom2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + tf2::Quaternion quat_2d; + quat_2d.setRPY(0.0, 0.0, yaw); + + mOdom2BaseTransf.setRotation(quat_2d); +#else + if (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { + tf2::Vector3 tr_2d = mOdom2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mOdom2BaseTransf.setOrigin(tr_2d); + + double roll, pitch, yaw; + tf2::Matrix3x3(mOdom2BaseTransf.getRotation()) + .getRPY(roll, pitch, yaw); + + tf2::Quaternion quat_2d; + quat_2d.setRPY(0.0, 0.0, yaw); + + mOdom2BaseTransf.setRotation(quat_2d); + } else if (fabs(mFixedZValue) > 1e-6) { + tf2::Vector3 tr_2d = mOdom2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mOdom2BaseTransf.setOrigin(tr_2d); + } +#endif + } + + // Transform twist from sensor frame to base frame + // Linear velocity: v_base = R * v_sensor + omega_sensor x r + // Angular velocity: omega_base = R * omega_sensor + tf2::Vector3 linear_sensor(deltaOdom.twist[0], deltaOdom.twist[1], deltaOdom.twist[2]); + tf2::Vector3 angular_sensor(deltaOdom.twist[3], deltaOdom.twist[4], deltaOdom.twist[5]); + + DEBUG_STREAM_PT( + "Delta ODOM Twist - Linear:" << + linear_sensor.x() << "," << linear_sensor.y() << "," << linear_sensor.z() << + " - Angular:" << angular_sensor.x() << "," << angular_sensor.y() << "," << + angular_sensor.z()); + + // Get rotation from sensor to base + tf2::Matrix3x3 rotation_sensor2base(mSensor2BaseTransf.getRotation()); + tf2::Vector3 translation_sensor2base = mSensor2BaseTransf.getOrigin(); + + // Transform angular velocity + angular_base = rotation_sensor2base * angular_sensor; + + // Transform linear velocity: v_base = R * v_sensor + omega_sensor x r + tf2::Vector3 linear_rotated = rotation_sensor2base * linear_sensor; + tf2::Vector3 cross_product = angular_base.cross(translation_sensor2base); + linear_base = linear_rotated + cross_product; + + DEBUG_STREAM_PT( + "Delta ODOM Twist - Transformed Linear:" << + linear_base.x() << "," << linear_base.y() << "," << + linear_base.z() << " - Transformed Angular:" << + angular_base.x() << "," << angular_base.y() << "," << + angular_base.z()); + + mPosTrackingReady = true; + } else if (mFloorAlignment) { + DEBUG_STREAM_THROTTLE_PT( + 5000.0, + "Odometry will be published as soon as the floor as " + "been detected for the first time"); + } + } + + if (_debugPosTracking) { + double roll, pitch, yaw; + tf2::Matrix3x3(mOdom2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + DEBUG_PT( + "+++ Odometry [%s -> %s] - {%.3f,%.3f,%.3f} {%.3f,%.3f,%.3f}", + mOdomFrameId.c_str(), mBaseFrameId.c_str(), + mOdom2BaseTransf.getOrigin().x(), mOdom2BaseTransf.getOrigin().y(), + mOdom2BaseTransf.getOrigin().z(), roll * RAD2DEG, pitch * RAD2DEG, + yaw * RAD2DEG); + } + + // Publish odometry message + publishOdom(mOdom2BaseTransf, deltaOdom, linear_base, angular_base, mFrameTimestamp); +} + +void ZedCamera::publishOdom( + tf2::Transform & odom2baseTransf, sl::Pose & slPose, const tf2::Vector3 & linear_velocity, + const tf2::Vector3 & angular_velocity, + rclcpp::Time t) +{ + size_t odomSub = 0; + + try { + odomSub = count_subscribers(mOdomTopic); // mPubOdom subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + if (odomSub) { + auto odomMsg = std::make_unique(); + + odomMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + odomMsg->header.frame_id = mOdomFrameId; // frame + odomMsg->child_frame_id = mBaseFrameId; // camera_frame + + // Add all value in odometry message + odomMsg->pose.pose.position.x = odom2baseTransf.getOrigin().x(); + odomMsg->pose.pose.position.y = odom2baseTransf.getOrigin().y(); + odomMsg->pose.pose.position.z = odom2baseTransf.getOrigin().z(); + odomMsg->pose.pose.orientation.x = odom2baseTransf.getRotation().x(); + odomMsg->pose.pose.orientation.y = odom2baseTransf.getRotation().y(); + odomMsg->pose.pose.orientation.z = odom2baseTransf.getRotation().z(); + odomMsg->pose.pose.orientation.w = odom2baseTransf.getRotation().w(); + + // Odometry pose covariance + for (size_t i = 0; i < odomMsg->pose.covariance.size(); i++) { + odomMsg->pose.covariance[i] = + static_cast(slPose.pose_covariance[i]); + + if (mTwoDMode) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + if (i == 14 || i == 21 || i == 28) { + odomMsg->pose.covariance[i] = 1e-9; // Very low covariance if 2D mode + } else if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 13) || (i >= 15 && i <= 16) || + (i >= 18 && i <= 20) || (i == 22) || (i >= 24 && i <= 27)) + { + odomMsg->pose.covariance[i] = 0.0; + } +#else + if (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { + if (i == 14 || i == 21 || i == 28) { + odomMsg->pose.covariance[i] = 1e-9; // Very low covariance if 2D mode + } else if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 13) || (i >= 15 && i <= 16) || + (i >= 18 && i <= 20) || (i == 22) || (i >= 24 && i <= 27)) + { + odomMsg->pose.covariance[i] = 0.0; + } + } +#endif + } + } + + // Odometry twist + if (mTwoDMode) { + odomMsg->twist.twist.linear.x = linear_velocity.x(); + odomMsg->twist.twist.linear.y = linear_velocity.y(); + odomMsg->twist.twist.linear.z = 0.0; + odomMsg->twist.twist.angular.x = 0.0; + odomMsg->twist.twist.angular.y = 0.0; + odomMsg->twist.twist.angular.z = angular_velocity.z(); + } else { + odomMsg->twist.twist.linear.x = linear_velocity.x(); + odomMsg->twist.twist.linear.y = linear_velocity.y(); + odomMsg->twist.twist.linear.z = linear_velocity.z(); + odomMsg->twist.twist.angular.x = angular_velocity.x(); + odomMsg->twist.twist.angular.y = angular_velocity.y(); + odomMsg->twist.twist.angular.z = angular_velocity.z(); + } + + // Publish odometry message + DEBUG_STREAM_PT("Publishing ODOM message"); + try { + if (mPubOdom) {mPubOdom->publish(std::move(odomMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::processPose() +{ + if (!mSensor2BaseTransfValid) { + getSens2BaseTransform(); + } + + if (!mSensor2CameraTransfValid) { + getSens2CameraTransform(); + } + + if (!mCamera2BaseTransfValid) { + getCamera2BaseTransform(); + } + + sl::Pose pose; + sl::POSITIONAL_TRACKING_STATE pt_state; + if (!mGnssFusionEnabled && mFusionStatus != sl::FUSION_ERROR_CODE::SUCCESS) { + pt_state = mZed->getPosition(pose, sl::REFERENCE_FRAME::WORLD); + } else { + pt_state = mFusion.getPosition( + pose, sl::REFERENCE_FRAME::WORLD /*,mCamUuid*/, + sl::CameraIdentifier(), + sl::POSITION_TYPE::FUSION); + } + +#ifdef ENABLE_PT_LOCK_CHECK + // ----> Check for locked Positional Tracking + float dist = std::sqrt( + std::pow(pose.getTranslation()(0) - mLastZedPose.getTranslation()(0), 2) + + std::pow(pose.getTranslation()(1) - mLastZedPose.getTranslation()(1), 2) + + std::pow(pose.getTranslation()(2) - mLastZedPose.getTranslation()(2), 2)); + if (dist < 1e-9 && pt_state == sl::POSITIONAL_TRACKING_STATE::OK && + mPosTrackingStatus.spatial_memory_status != sl::SPATIAL_MEMORY_STATUS::LOST) + { + mPoseLocked = true; + mPoseLockCount++; + + if (mPoseLockCount > mCamGrabFrameRate) { // > 1 second + RCLCPP_WARN_STREAM( + get_logger(), + "Pos. Track. seems to be locked (pose diff.: " + << dist << " m) since " << mPoseLockCount << " frames - Status: " + << sl::toString(mPosTrackingStatus.spatial_memory_status) + .c_str()); + } + } else { + mPoseLocked = false; + mPoseLockCount = 0; + } + // <---- Check for locked Positional Tracking +#endif + + // Update last pose + mLastZedPose = pose; + + publishPoseStatus(); + publishGnssPoseStatus(); + + sl::Translation translation = mLastZedPose.getTranslation(); + sl::Orientation quat = mLastZedPose.getOrientation(); + + if (quat.sum() == 0) { + return; + } + + DEBUG_STREAM_PT( + "MAP -> Tracking Status: " + << sl::toString(mPosTrackingStatus.spatial_memory_status).c_str()); + + DEBUG_PT( + "Sensor POSE %s- [%s -> %s]:\n%s}", + _debugGnss ? "(`sl::Fusion`) " : "", mLeftCamFrameId.c_str(), + mMapFrameId.c_str(), mLastZedPose.pose_data.getInfos().c_str()); + + if (_debugGnss) { + sl::Pose camera_pose; + mZed->getPosition(camera_pose, sl::REFERENCE_FRAME::WORLD); + + DEBUG_PT( + "Sensor POSE (`sl::Camera`) [%s -> %s]:\n%s", + mLeftCamFrameId.c_str(), mMapFrameId.c_str(), + camera_pose.pose_data.getInfos().c_str()); + } + + if (mPosTrackingStatus.odometry_status == sl::ODOMETRY_STATUS::OK) { + double roll, pitch, yaw; + tf2::Matrix3x3(tf2::Quaternion(quat.ox, quat.oy, quat.oz, quat.ow)) + .getRPY(roll, pitch, yaw); + + tf2::Transform map_to_sens_transf; + map_to_sens_transf.setOrigin( + tf2::Vector3(translation(0), translation(1), translation(2))); + map_to_sens_transf.setRotation( + tf2::Quaternion(quat(0), quat(1), quat(2), quat(3))); + + mMap2BaseTransf = + map_to_sens_transf * mSensor2BaseTransf; // Base position in map frame + + if (mTwoDMode) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + tf2::Vector3 tr_2d = mMap2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mMap2BaseTransf.setOrigin(tr_2d); + + tf2::Matrix3x3(mMap2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + tf2::Quaternion quat_2d; + quat_2d.setRPY(0.0, 0.0, yaw); + + mMap2BaseTransf.setRotation(quat_2d); +#else + if (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { + tf2::Vector3 tr_2d = mMap2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mMap2BaseTransf.setOrigin(tr_2d); + + tf2::Matrix3x3(mMap2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + tf2::Quaternion quat_2d; + quat_2d.setRPY(0.0, 0.0, yaw); + + mMap2BaseTransf.setRotation(quat_2d); + } else if (fabs(mFixedZValue) > 1e-6) { + tf2::Vector3 tr_2d = mMap2BaseTransf.getOrigin(); + tr_2d.setZ(mFixedZValue); + mMap2BaseTransf.setOrigin(tr_2d); + } +#endif + } + + // double roll, pitch, yaw; + tf2::Matrix3x3(mMap2BaseTransf.getRotation()).getRPY(roll, pitch, yaw); + + DEBUG_PT( + "=== Base POSE [%s -> %s] - {%.3f,%.3f,%.3f} {%.3f,%.3f,%.3f}", + mMapFrameId.c_str(), mBaseFrameId.c_str(), + mMap2BaseTransf.getOrigin().x(), mMap2BaseTransf.getOrigin().y(), + mMap2BaseTransf.getOrigin().z(), roll * RAD2DEG, pitch * RAD2DEG, + yaw * RAD2DEG); + + // Transformation from map to odometry frame + mMap2OdomTransf = mMap2BaseTransf * mOdom2BaseTransf.inverse(); + + tf2::Matrix3x3(mMap2OdomTransf.getRotation()).getRPY(roll, pitch, yaw); + + DEBUG_PT( + "+++ Diff [%s -> %s] - {%.3f,%.3f,%.3f} {%.3f,%.3f,%.3f}", + mMapFrameId.c_str(), mOdomFrameId.c_str(), + mMap2OdomTransf.getOrigin().x(), mMap2OdomTransf.getOrigin().y(), + mMap2OdomTransf.getOrigin().z(), roll * RAD2DEG, pitch * RAD2DEG, + yaw * RAD2DEG); + + + // Publish Pose message + publishPose(); + if (mPublish3DLandmarks) { + if (mFrameSkipCountLandmarks == 0) { + publishPoseLandmarks(); + mFrameSkipCountLandmarks = mPublishLandmarkSkipFrame; + } else { + mFrameSkipCountLandmarks--; + } + } + mPosTrackingReady = true; + } +} + +void ZedCamera::publishPoseStatus() +{ + size_t statusSub = 0; + + try { + statusSub = + count_subscribers(mPoseStatusTopic); // mPubPoseStatus subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + if (statusSub > 0) { + auto msg = std::make_unique(); + msg->odometry_status = static_cast(mPosTrackingStatus.odometry_status); + msg->spatial_memory_status = static_cast(mPosTrackingStatus.spatial_memory_status); + + try { + if (mPubPoseStatus) {mPubPoseStatus->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishGnssPoseStatus() +{ + size_t statusSub = 0; + + try { + statusSub = count_subscribers( + mGnssPoseStatusTopic); // mPubGnssPoseStatus subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + if (statusSub > 0) { + auto msg = std::make_unique(); + + msg->gnss_fusion_status = static_cast(mFusedPosTrackingStatus.gnss_fusion_status); + + try { + if (mPubGnssPoseStatus) {mPubGnssPoseStatus->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishGeoPoseStatus() +{ + size_t statusSub = 0; + + try { + statusSub = count_subscribers( + mGeoPoseStatusTopic); // mPubGnssPoseStatus subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + if (statusSub > 0) { + auto msg = std::make_unique(); + + msg->gnss_fusion_status = + static_cast(mFusedPosTrackingStatus.gnss_fusion_status); + + try { + if (mPubGeoPoseStatus) {mPubGeoPoseStatus->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishPoseLandmarks() +{ + size_t landmarksSub = 0; + + try { + landmarksSub = + count_subscribers(mPointcloud3DLandmarksTopic); // mPubPoseLandmarks subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + DEBUG_STREAM_PT( + " * [publishPoseLandmarks] Subscribers to topic '" << mPointcloud3DLandmarksTopic << "': " << + landmarksSub); + + if (landmarksSub > 0) { + std::map map_lm3d; + std::vector map_lm2d; + auto res1 = mZed->getPositionalTrackingLandmarks(map_lm3d); // 3D landmarks (all the landmarks in world coords) + auto res2 = mZed->getPositionalTrackingLandmarks2D(map_lm2d); // 2D landmarks (currently tracked in image) + + if (res1 != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), + " * [publishPoseLandmarks] getPositionalTrackingLandmarks error: " << + sl::toString(res1)); + return; + } + if (res2 != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), + " * [publishPoseLandmarks] getPositionalTrackingLandmarks2D error: " << + sl::toString(res2)); + return; + } + + auto msg = std::make_unique(); + + // Initialize Point Cloud message + // https://github.com/ros/common_msgs/blob/jade-devel/sensor_msgs/include/sensor_msgs/point_cloud2_iterator.h + + int ptsCount = map_lm3d.size(); + + DEBUG_STREAM_PT(" * [publishPoseLandmarks] " << ptsCount << " landmarks to publish"); + + if (ptsCount == 0) { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] No landmarks to publish, skipping..."); + return; + } + + if (mSvoMode) { + msg->header.stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else if (mSimMode) { + if (mUseSimTime) { + msg->header.stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else { + msg->header.stamp = mUsePubTimestamps ? get_clock()->now() : sl_tools::slTime2Ros( + mMatCloud.timestamp); + } + } else { + msg->header.stamp = mUsePubTimestamps ? get_clock()->now() : sl_tools::slTime2Ros( + mMatCloud.timestamp); + } + + msg->header.frame_id = mMapFrameId; // Set the header values of the ROS message + + int val = 1; + msg->is_bigendian = !(*reinterpret_cast(&val) == 1); + msg->is_dense = true; + + msg->width = ptsCount; + msg->height = 1; + + sensor_msgs::PointCloud2Modifier modifier(*(msg.get())); + modifier.setPointCloud2Fields( + 4, + "x", 1, sensor_msgs::msg::PointField::FLOAT32, + "y", 1, sensor_msgs::msg::PointField::FLOAT32, + "z", 1, sensor_msgs::msg::PointField::FLOAT32, + "rgb", 1, sensor_msgs::msg::PointField::FLOAT32); + + DEBUG_STREAM_PT( + " * [publishPoseLandmarks] PointCloud2 msg - Width: " << msg->width << + " - Height: " << msg->height << + " - PointStep: " << msg->point_step << + " - RowStep: " << msg->row_step << + " - Data size: " << msg->data.size()); + + sensor_msgs::PointCloud2Iterator iter_x(*msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(*msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(*msg, "z"); + sensor_msgs::PointCloud2Iterator iter_r(*msg, "r"); + sensor_msgs::PointCloud2Iterator iter_g(*msg, "g"); + sensor_msgs::PointCloud2Iterator iter_b(*msg, "b"); + sensor_msgs::PointCloud2Iterator iter_a(*msg, "a"); + + for (const auto & landmark : map_lm3d) { + // ----> Set the landmark coordinates + *iter_x = landmark.second.position.x; + *iter_y = landmark.second.position.y; + *iter_z = landmark.second.position.z; + // <---- Set the landmark coordinates + // ----> Set the landmark color (green if tracked in 2D, red if not) + // If a landmark is listed in the 2D landmarks, it means that it is currently tracked, so that we can color it in green + if (std::any_of( + map_lm2d.begin(), map_lm2d.end(), + [&landmark](const sl::Landmark2D & lm2d) {return lm2d.id == landmark.first;})) + { + *iter_r = 63; + *iter_g = 255; + *iter_b = 67; + *iter_a = 255; + } else { + *iter_r = 255; + *iter_g = 100; + *iter_b = 100; + *iter_a = 150; + } + // <---- Set the landmark color (green if tracked in 2D, red if not) + + // Increment iterators + ++iter_x; + ++iter_y; + ++iter_z; + ++iter_r; + ++iter_g; + ++iter_b; + ++iter_a; + } + + // Pointcloud publishing + DEBUG_PT(" * [publishPoseLandmarks] Publishing LANDMARK 3D POINT CLOUD message"); + +#ifdef FOUND_POINT_CLOUD_TRANSPORT + try { + mPub3DLandmarks.publish(std::move(msg)); + } catch (std::system_error & e) { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] Message publishing generic exception"); + } +#else + try { + if (mPub3DLandmarks) { + mPub3DLandmarks->publish(std::move(msg)); + } else { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] Publisher not initialized"); + } + } catch (std::system_error & e) { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_PT(" * [publishPoseLandmarks] Message publishing generic exception"); + } +#endif + } +} + +void ZedCamera::publishPose() +{ + size_t poseSub = 0; + size_t poseCovSub = 0; + + try { + poseSub = count_subscribers(mPoseTopic); // mPubPose subscribers + poseCovSub = count_subscribers(mPoseCovTopic); // mPubPoseCov subscribers + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("publishPose: Exception while counting subscribers"); + return; + } + + tf2::Transform base_pose; + base_pose.setIdentity(); + + base_pose = mMap2BaseTransf; + + std_msgs::msg::Header header; + header.stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + header.frame_id = mMapFrameId; // frame + + geometry_msgs::msg::Pose pose; + + // Add all value in Pose message + pose.position.x = mMap2BaseTransf.getOrigin().x(); + pose.position.y = mMap2BaseTransf.getOrigin().y(); + pose.position.z = mMap2BaseTransf.getOrigin().z(); + pose.orientation.x = mMap2BaseTransf.getRotation().x(); + pose.orientation.y = mMap2BaseTransf.getRotation().y(); + pose.orientation.z = mMap2BaseTransf.getRotation().z(); + pose.orientation.w = mMap2BaseTransf.getRotation().w(); + + if (poseSub > 0) { + auto poseNoCov = std::make_unique(); + + poseNoCov->header = header; + poseNoCov->pose = pose; + + // Publish pose stamped message + DEBUG_STREAM_PT("Publishing POSE NO COV message"); + try { + if (mPubPose) {mPubPose->publish(std::move(poseNoCov));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (mPublishPoseCov) { + if (poseCovSub > 0) { + auto poseCov = std::make_unique(); + + poseCov->header = header; + poseCov->pose.pose = pose; + + // Odometry pose covariance if available + + for (size_t i = 0; i < poseCov->pose.covariance.size(); i++) { + poseCov->pose.covariance[i] = + static_cast(mLastZedPose.pose_covariance[i]); + if (mTwoDMode) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 29) || (i >= 32 && i <= 34)) + { + poseCov->pose.covariance[i] = + 1e-9; // Very low covariance if 2D mode + } +#else + if (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { + if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 29) || (i >= 32 && i <= 34)) + { + poseCov->pose.covariance[i] = + 1e-9; // Very low covariance if 2D mode + } + } +#endif + } + } + + // Publish pose with covariance stamped message + DEBUG_STREAM_PT("Publishing POSE COV message"); + try { + if (mPubPoseCov) {mPubPoseCov->publish(std::move(poseCov));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + } +} + +void ZedCamera::processGeoPose() +{ + if (!mGnssMsgReceived) { + return; + } + + if (!mGnssFusionEnabled) { + return; + } + + if (!mGnss2BaseTransfValid) { + getGnss2BaseTransform(); + } + + // Update GeoPose + mFusion.getGeoPose(mLastGeoPose); + + // ----> Update GeoPose status + mFusedPosTrackingStatus = mFusion.getFusedPositionalTrackingStatus(); + publishGeoPoseStatus(); + + if (mFusedPosTrackingStatus.gnss_fusion_status != + sl::GNSS_FUSION_STATUS::OK) + { + RCLCPP_INFO_STREAM( + get_logger(), + "GNSS fusion status: " << sl::toString( + mFusedPosTrackingStatus.gnss_fusion_status)); + mGnssInitGood = false; + } + // <---- Update GeoPose status + + // ----> Setup Lat/Long + double altitude = mLastGeoPose.latlng_coordinates.getAltitude(); + if (mGnssZeroAltitude) { + altitude = 0.0; + } + mLastLatLongPose.setCoordinates( + mLastGeoPose.latlng_coordinates.getLatitude(), + mLastGeoPose.latlng_coordinates.getLongitude(), altitude, true); + + mLastHeading = mLastGeoPose.heading; + // <---- Setup Lat/Long + + // Get ECEF + sl::GeoConverter::latlng2ecef(mLastLatLongPose, mLastEcefPose); + + // Get UTM + sl::GeoConverter::latlng2utm(mLastLatLongPose, mLastUtmPose); + + DEBUG_GNSS("Good GNSS localization:"); + DEBUG_GNSS( + " * ECEF: %.6f m, %.6f m, %.6f m", mLastEcefPose.x, + mLastEcefPose.y, mLastEcefPose.z); + DEBUG_GNSS( + " * Lat. Long.: %.6f°, %.6f°,%.3f m", mLastLatLongPose.getLatitude(false), + mLastLatLongPose.getLongitude(false), mLastLatLongPose.getAltitude()); + DEBUG_GNSS(" * Heading: %.3f°", mLastHeading * RAD2DEG); + DEBUG_GNSS( + " * UTM: %.5f m, %.5f m, %.5f°, %s", mLastUtmPose.easting, + mLastUtmPose.northing, mLastUtmPose.gamma, + mLastUtmPose.UTMZone.c_str()); + + // If calibration is not good me must update the `utm -> map` transform + if (!mGnssInitGood) { + mInitEcefPose = mLastEcefPose; + mInitUtmPose = mLastUtmPose; + mInitLatLongPose = mLastLatLongPose; + mInitHeading = mLastHeading; + + if (mFusedPosTrackingStatus.gnss_fusion_status == sl::GNSS_FUSION_STATUS::OK) { + mGnssInitGood = true; + } else { + mGnssInitGood = false; + } + + RCLCPP_INFO(get_logger(), "GNSS reference localization initialized"); + + // ----> Create (static) transform UTM to MAP + // Add only Heading to pose + tf2::Quaternion pose_quat_yaw; + pose_quat_yaw.setRPY(0.0, 0.0, mLastHeading); + mLastHeadingQuat = pose_quat_yaw; + + // Set UTM transform + // Get position from ZED SDK UTM pose + mMap2UtmTransf.setOrigin( + tf2::Vector3( + mLastUtmPose.easting, mLastUtmPose.northing, + mLastGeoPose.latlng_coordinates.getAltitude())); + mMap2UtmTransf.setRotation(pose_quat_yaw); + // ----> Create (static) transform UTM to MAP + + mMap2UtmTransfValid = true; + + if (!mUtmAsParent) { + tf2::Transform map2utmInverse = mMap2UtmTransf.inverse(); + + double roll, pitch, yaw; + tf2::Matrix3x3(map2utmInverse.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), " Static transform UTM to MAP [%s -> %s]", + mUtmFrameId.c_str(), mMapFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + map2utmInverse.getOrigin().x(), + map2utmInverse.getOrigin().y(), + map2utmInverse.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", + roll * RAD2DEG, pitch * RAD2DEG, yaw * RAD2DEG); + } else { + double roll, pitch, yaw; + tf2::Matrix3x3(mMap2UtmTransf.getRotation()).getRPY(roll, pitch, yaw); + + RCLCPP_INFO( + get_logger(), " Static transform MAP to UTM [%s -> %s]", + mMapFrameId.c_str(), mUtmFrameId.c_str()); + RCLCPP_INFO( + get_logger(), " * Translation: {%.3f,%.3f,%.3f}", + mMap2UtmTransf.getOrigin().x(), + mMap2UtmTransf.getOrigin().y(), + mMap2UtmTransf.getOrigin().z()); + RCLCPP_INFO( + get_logger(), " * Rotation: {%.3f,%.3f,%.3f}", + roll * RAD2DEG, pitch * RAD2DEG, yaw * RAD2DEG); + } + } + + if (mMap2UtmTransfValid && mPublishUtmTf) { + // Publish the transform + // Note: we cannot use a static TF publisher because IPC is enabled + geometry_msgs::msg::TransformStamped transformStamped; + + transformStamped.header.stamp = + mUsePubTimestamps ? + get_clock()->now() : + (mFrameTimestamp + rclcpp::Duration(0, mTfOffset * 1e9)); + transformStamped.header.frame_id = mUtmAsParent ? mUtmFrameId : mMapFrameId; + transformStamped.child_frame_id = mUtmAsParent ? mMapFrameId : mUtmFrameId; + + // conversion from Transform to message + transformStamped.transform = mUtmAsParent ? + (tf2::toMsg(mMap2UtmTransf)) : + (tf2::toMsg(mMap2UtmTransf.inverse())); + + mTfBroadcaster->sendTransform(transformStamped); + + // Debug info + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + transformStamped.transform.rotation.x, + transformStamped.transform.rotation.y, + transformStamped.transform.rotation.z, + transformStamped.transform.rotation.w)) + .getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + transformStamped.header.stamp.sec << "." << transformStamped.header.stamp.nanosec + << " - TF [" + << transformStamped.header.frame_id << " -> " + << transformStamped.child_frame_id << "] Position: (" + << transformStamped.transform.translation.x << ", " + << transformStamped.transform.translation.y << ", " + << transformStamped.transform.translation.z + << ") - Orientation RPY: (" << roll * RAD2DEG << ", " + << pitch * RAD2DEG << ", " << yaw * RAD2DEG << ")"); + } + } + + publishGnssPose(); +} + +void ZedCamera::publishGnssPose() +{ + DEBUG_GNSS("=== publishGnssPose ==="); + + size_t gnssSub = 0; + size_t geoPoseSub = 0; + size_t fusedFixSub = 0; + size_t originFixSub = 0; + + try { + gnssSub = count_subscribers(mGnssPoseTopic); + geoPoseSub = count_subscribers(mGeoPoseTopic); + fusedFixSub = count_subscribers(mFusedFixTopic); + originFixSub = count_subscribers(mOriginFixTopic); + } catch (...) { + rcutils_reset_error(); + DEBUG_GNSS("publishGnssPose: Exception while counting subscribers"); + return; + } + + if (gnssSub > 0) { + auto msg = std::make_unique(); + + msg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + msg->header.frame_id = mMapFrameId; + msg->child_frame_id = mBaseFrameId; + + // Add all value in odometry message + msg->pose.pose.position.x = mMap2BaseTransf.getOrigin().x(); + msg->pose.pose.position.y = mMap2BaseTransf.getOrigin().y(); + msg->pose.pose.position.z = mMap2BaseTransf.getOrigin().z(); + msg->pose.pose.orientation.x = mMap2BaseTransf.getRotation().x(); + msg->pose.pose.orientation.y = mMap2BaseTransf.getRotation().y(); + msg->pose.pose.orientation.z = mMap2BaseTransf.getRotation().z(); + msg->pose.pose.orientation.w = mMap2BaseTransf.getRotation().w(); + + // Odometry pose covariance + for (size_t i = 0; i < msg->pose.covariance.size(); i++) { + msg->pose.covariance[i] = + static_cast(mLastZedPose.pose_covariance[i]); + + if (mTwoDMode) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + if (i == 14 || i == 21 || i == 28) { + msg->pose.covariance[i] = 1e-9; // Very low covariance if 2D mode + } else if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 13) || (i >= 15 && i <= 16) || + (i >= 18 && i <= 20) || (i == 22) || (i >= 24 && i <= 27)) + { + msg->pose.covariance[i] = 0.0; + } +#else + if (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { + if (i == 14 || i == 21 || i == 28) { + msg->pose.covariance[i] = 1e-9; // Very low covariance if 2D mode + } else if ((i >= 2 && i <= 4) || (i >= 8 && i <= 10) || + (i >= 12 && i <= 13) || (i >= 15 && i <= 16) || + (i >= 18 && i <= 20) || (i == 22) || + (i >= 24 && i <= 27)) + { + msg->pose.covariance[i] = 0.0; + } + } +#endif + } + } + + // Publish gnss message + // DEBUG_GNSS("Publishing GNSS pose message"); + try { + if (mPubGnssPose) {mPubGnssPose->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (geoPoseSub > 0) { + auto msg = std::make_unique(); + + msg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + msg->header.frame_id = mMapFrameId; + + // Latest Lat Long data + msg->pose.position.latitude = mLastLatLongPose.getLatitude(false); + msg->pose.position.longitude = mLastLatLongPose.getLongitude(false); + msg->pose.position.altitude = mLastLatLongPose.getAltitude(); + + // Latest Heading Quaternion + msg->pose.orientation.x = mLastHeadingQuat.getX(); + msg->pose.orientation.y = mLastHeadingQuat.getY(); + msg->pose.orientation.z = mLastHeadingQuat.getZ(); + msg->pose.orientation.w = mLastHeadingQuat.getW(); + + // Publish gnss message + // DEBUG_GNSS("Publishing GeoPose message"); + try { + if (mPubGeoPose) {mPubGeoPose->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (fusedFixSub > 0) { + auto msg = std::make_unique(); + + msg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : + mFrameTimestamp; + msg->header.frame_id = mMapFrameId; + + msg->status.status = sensor_msgs::msg::NavSatStatus::STATUS_SBAS_FIX; + msg->status.service = mGnssService; + msg->latitude = mLastLatLongPose.getLatitude(false); + msg->longitude = mLastLatLongPose.getLongitude(false); + msg->altitude = mLastLatLongPose.getAltitude(); + + // ----> Covariance + // Extract 3x3 position submatrix from 6x6 row-major pose covariance + // pose_covariance is [tx,ty,tz,rx,ry,rz], we need the upper-left 3x3 + msg->position_covariance_type = + sensor_msgs::msg::NavSatFix::COVARIANCE_TYPE_KNOWN; + for (size_t r = 0; r < 3; r++) { + for (size_t c = 0; c < 3; c++) { + msg->position_covariance[r * 3 + c] = + static_cast(mLastZedPose.pose_covariance[r * 6 + c]); + } + } + + if (mTwoDMode) { + bool apply2d = true; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + if (mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { + apply2d = false; + } +#endif + if (apply2d) { + // Zero out z-related cross terms (row 2 and col 2), set cov(z,z) small + msg->position_covariance[2] = 0.0; // cov(x,z) + msg->position_covariance[5] = 0.0; // cov(y,z) + msg->position_covariance[6] = 0.0; // cov(z,x) + msg->position_covariance[7] = 0.0; // cov(z,y) + msg->position_covariance[8] = 1e-9; // cov(z,z) very low in 2D mode + } + } + // <---- Covariance + + // Publish Fused Fix message + // DEBUG_GNSS("Publishing Fused Fix message");ù + try { + if (mPubFusedFix) {mPubFusedFix->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (originFixSub > 0) { + auto msg = std::make_unique(); + + msg->header.stamp = mUsePubTimestamps ? + get_clock()->now() : + mFrameTimestamp; + msg->header.frame_id = + mGnssOriginFrameId; + + msg->status.status = sensor_msgs::msg::NavSatStatus::STATUS_SBAS_FIX; + msg->status.service = mGnssService; + msg->latitude = mInitLatLongPose.getLatitude(false); + msg->longitude = mInitLatLongPose.getLongitude(false); + msg->altitude = mInitLatLongPose.getAltitude(); + + // ----> Covariance + msg->position_covariance_type = + sensor_msgs::msg::NavSatFix::COVARIANCE_TYPE_UNKNOWN; + for (size_t i = 0; i < msg->position_covariance.size(); i++) { + msg->position_covariance[i] = -1.0; + } + // <---- Covariance + + // Publish Fused Fix message + // DEBUG_GNSS("Publishing Fused Fix message"); + try { + if (mPubOriginFix) {mPubOriginFix->publish(std::move(msg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +bool ZedCamera::isPosTrackingRequired() +{ +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (mDepthDisabled && mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (mDepthDisabled) { +#endif + DEBUG_ONCE_PT("POS. TRACKING not required: Depth disabled (unless GEN3 mode)."); + return false; + } + + if (mPosTrackingEnabled) { + DEBUG_ONCE_PT("POS. TRACKING required: enabled by param."); + return true; + } + + if (mPublishTF) { + DEBUG_ONCE_PT("POS. TRACKING required: enabled by TF param."); + + if (!mPosTrackingEnabled) { + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, but forced to " + "ENABLE because required by `pos_tracking.publish_tf: true`"); + } + return true; + } + + if (mDepthStabilization != 0) { // -1 = SDK default (> 0), so also forces tracking + DEBUG_ONCE_PT( + "POS. TRACKING required: enabled by depth stabilization param."); + +#if ((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 52) + if (mDepthStabilization < 0) { + mDepthStabilization = 30; // Set a default value if stabilization is enabled with the SDK default setting + } +#endif + + if (!mPosTrackingEnabled) { + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, but forced to " + "ENABLE because required by `depth.depth_stabilization > 0 or -1 (SDK default)`"); + } + + return true; + } + + if (mMappingEnabled) { + DEBUG_ONCE_PT("POS. TRACKING required: enabled by mapping"); + + if (!mPosTrackingEnabled) { + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, but forced to " + "ENABLE because required by `mapping.mapping_enabled: true`"); + } + + return true; + } + + if (mObjDetEnabled && mObjDetTracking) { + DEBUG_ONCE_PT("POS. TRACKING required: enabled by object detection."); + + if (!mPosTrackingEnabled) { + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, but forced to " + "ENABLE because required by `object_detection.enable_tracking: true`"); + } + + return true; + } + + if (mBodyTrkEnabled && mBodyTrkEnableTracking) { + DEBUG_ONCE_PT("POS. TRACKING required: enabled by body tracking."); + + if (!mPosTrackingEnabled) { + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, but forced to " + "ENABLE because required by `body_tracking.enable_tracking: true`"); + } + + return true; + } + + if (!updatePosTrackingSubscribers()) { + RCLCPP_WARN( + get_logger(), + "isPosTrackingRequired: Exception while counting subscribers"); + return false; + } + + if (mPosTrackingSubCount > 0) { + DEBUG_ONCE_PT("POS. TRACKING required: topic subscribed."); + return true; + } + + if (mZed->isPositionalTrackingEnabled()) { + + DEBUG_ONCE_PT("POS. TRACKING required: enabled by ZED SDK."); + + RCLCPP_WARN_ONCE( + get_logger(), + "POSITIONAL TRACKING disabled in the parameters, enabled by the ZED SDK because required by one of the modules."); + + return true; + } + + DEBUG_ONCE_PT("POS. TRACKING not required."); + return false; +} + +bool ZedCamera::updatePosTrackingSubscribers(bool force) +{ + constexpr auto kSubQueryInterval = std::chrono::milliseconds(200); + auto now = std::chrono::steady_clock::now(); + + if (!force && mPosTrackingSubCountInit && + (now - mLastPosTrackingSubCountQuery) < kSubQueryInterval) + { + return true; + } + + mLastPosTrackingSubCountQuery = now; + mPosTrackingSubCountInit = true; + mPosTrackingSubCount = 0; + + try { + if (mPubPose) {mPosTrackingSubCount += count_subscribers(mPubPose->get_topic_name());} + if (mPubPoseCov) {mPosTrackingSubCount += count_subscribers(mPubPoseCov->get_topic_name());} + if (mPubPosePath) {mPosTrackingSubCount += count_subscribers(mPubPosePath->get_topic_name());} + if (mPubOdom) {mPosTrackingSubCount += count_subscribers(mPubOdom->get_topic_name());} + if (mPubOdomPath) {mPosTrackingSubCount += count_subscribers(mPubOdomPath->get_topic_name());} + } catch (...) { + rcutils_reset_error(); + return false; + } + + return true; +} + +void ZedCamera::callback_pubTemp() +{ + DEBUG_STREAM_ONCE_SENS("Temperatures callback called"); + + if (mGrabStatus != sl::ERROR_CODE::SUCCESS && mGrabStatus != sl::ERROR_CODE::CORRUPTED_FRAME) { + DEBUG_SENS("Camera not ready. Temperature data not published"); + rclcpp::sleep_for(1s); + return; + } + + + if (sl_tools::isZED(mCamRealModel) || sl_tools::isZEDM(mCamRealModel)) { + DEBUG_SENS( + "callback_pubTemp: the callback should never be called for the ZED " + "or " + "ZEDM camera models!"); + return; + } + + // ----> Always update temperature values for diagnostic + sl::SensorsData sens_data; + sl::ERROR_CODE err = + mZed->getSensorsData(sens_data, sl::TIME_REFERENCE::CURRENT); + if (err != sl::ERROR_CODE::SUCCESS) { + // Only warn if not in SVO mode or if the error is not a benign sensor + // unavailability + if (!mSvoMode || err != sl::ERROR_CODE::SENSORS_NOT_AVAILABLE) { + RCLCPP_WARN_STREAM( + get_logger(), + "[callback_pubTemp] sl::getSensorsData error: " + << sl::toString(err).c_str()); + } + return; + } + + if (sl_tools::isZED2OrZED2i(mCamRealModel)) { + sens_data.temperature.get( + sl::SensorsData::TemperatureData::SENSOR_LOCATION::ONBOARD_LEFT, + mTempLeft); + sens_data.temperature.get( + sl::SensorsData::TemperatureData::SENSOR_LOCATION::ONBOARD_RIGHT, + mTempRight); + } else { + mTempLeft = NOT_VALID_TEMP; + mTempRight = NOT_VALID_TEMP; + } + + sens_data.temperature.get( + sl::SensorsData::TemperatureData::SENSOR_LOCATION::IMU, mTempImu); + // <---- Always update temperature values for diagnostic + + // ----> Subscribers count + size_t tempLeftSubCount = 0; + size_t tempRightSubCount = 0; + size_t tempImuSubCount = 0; + + try { + tempLeftSubCount = 0; + tempRightSubCount = 0; + tempImuSubCount = 0; + + if (sl_tools::isZED2OrZED2i(mCamRealModel)) { + if (mPubTempL) {tempLeftSubCount = count_subscribers(mPubTempL->get_topic_name());} + if (mPubTempR) {tempRightSubCount = count_subscribers(mPubTempR->get_topic_name());} + } + if (mPubImuTemp) {tempImuSubCount = count_subscribers(mPubImuTemp->get_topic_name());} + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_SENS( + "callback_pubTemp: Exception while counting subscribers"); + return; + } + // <---- Subscribers count + + rclcpp::Time now = get_clock()->now(); + + if (tempLeftSubCount > 0) { + auto leftTempMsg = std::make_unique(); + + leftTempMsg->header.stamp = now; + + leftTempMsg->header.frame_id = mTempLeftFrameId; + leftTempMsg->temperature = static_cast(mTempLeft); + leftTempMsg->variance = 0.0; + + try { + if (mPubTempL) {mPubTempL->publish(std::move(leftTempMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (tempRightSubCount > 0) { + auto rightTempMsg = std::make_unique(); + + rightTempMsg->header.stamp = now; + + rightTempMsg->header.frame_id = mTempRightFrameId; + rightTempMsg->temperature = static_cast(mTempRight); + rightTempMsg->variance = 0.0; + + DEBUG_STREAM_SENS("Publishing RIGHT TEMP message"); + try { + if (mPubTempR) {mPubTempR->publish(std::move(rightTempMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (tempImuSubCount > 0) { + auto imuTempMsg = std::make_unique(); + + imuTempMsg->header.stamp = now; + + imuTempMsg->header.frame_id = mImuFrameId; + imuTempMsg->temperature = static_cast(mTempImu); + imuTempMsg->variance = 0.0; + + DEBUG_SENS("Publishing IMU TEMP message"); + try { + if (mPubImuTemp) {mPubImuTemp->publish(std::move(imuTempMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::callback_pubFusedPc() +{ + DEBUG_STREAM_ONCE_MAP("Mapping callback called"); + + auto pointcloudFusedMsg = std::make_unique(); + + uint32_t fusedCloudSubCount = 0; + try { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + fusedCloudSubCount = mPubFusedCloud.getNumSubscribers(); +#else + if (mPubFusedCloud) {fusedCloudSubCount = count_subscribers(mPubFusedCloud->get_topic_name());} +#endif + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_MAP("pubFusedPc: Exception while counting subscribers"); + return; + } + + if (fusedCloudSubCount == 0) { + return; + } + + if (!mZed->isOpened()) { + return; + } + + mZed->requestSpatialMapAsync(); + + while (mZed->getSpatialMapRequestStatusAsync() == sl::ERROR_CODE::FAILURE) { + // Mesh is still generating + rclcpp::sleep_for(1ms); + } + + sl::ERROR_CODE res = mZed->retrieveSpatialMapAsync(mFusedPC); + + if (res != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Fused point cloud not extracted: " + << sl::toString(res).c_str()); + return; + } + + size_t ptsCount = mFusedPC.getNumberOfPoints(); + bool resized = false; + + if (pointcloudFusedMsg->width != ptsCount || + pointcloudFusedMsg->height != 1) + { + // Initialize Point Cloud message + // https://github.com/ros/common_msgs/blob/jade-devel/sensor_msgs/include/sensor_msgs/point_cloud2_iterator.h + pointcloudFusedMsg->header.frame_id = + mMapFrameId; // Set the header values of the ROS message + pointcloudFusedMsg->is_bigendian = false; + pointcloudFusedMsg->is_dense = false; + pointcloudFusedMsg->width = ptsCount; + pointcloudFusedMsg->height = 1; + + sensor_msgs::PointCloud2Modifier modifier(*pointcloudFusedMsg); + modifier.setPointCloud2Fields( + 4, "x", 1, sensor_msgs::msg::PointField::FLOAT32, "y", 1, + sensor_msgs::msg::PointField::FLOAT32, "z", 1, + sensor_msgs::msg::PointField::FLOAT32, "rgb", 1, + sensor_msgs::msg::PointField::FLOAT32); + + resized = true; + } + + float * ptCloudPtr = reinterpret_cast(&pointcloudFusedMsg->data[0]); + int updated = 0; + + for (size_t c = 0; c < mFusedPC.chunks.size(); c++) { + if (mFusedPC.chunks[c].has_been_updated || resized) { + updated++; + size_t chunkSize = mFusedPC.chunks[c].vertices.size(); + + if (chunkSize > 0) { + float * cloud_pts = + reinterpret_cast(mFusedPC.chunks[c].vertices.data()); + memcpy(ptCloudPtr, cloud_pts, 4 * chunkSize * sizeof(float)); + ptCloudPtr += 4 * chunkSize; + if (mSvoMode) { + pointcloudFusedMsg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else if (mSimMode) { + if (mUseSimTime) { + pointcloudFusedMsg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else { + pointcloudFusedMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : + sl_tools::slTime2Ros(mFusedPC.chunks[c].timestamp); + } + } else { + pointcloudFusedMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : + sl_tools::slTime2Ros(mFusedPC.chunks[c].timestamp); + } + } + } + } + + rclcpp::Time ros_ts = get_clock()->now(); + + if (mPrevTs_pc != TIMEZERO_ROS) { + mPubFusedCloudPeriodMean_sec->addValue((ros_ts - mPrevTs_pc).seconds()); + } + mPrevTs_pc = ros_ts; + + // Pointcloud publishing + DEBUG_STREAM_MAP("Publishing FUSED POINT CLOUD message"); +#ifdef FOUND_POINT_CLOUD_TRANSPORT + try { + mPubFusedCloud.publish(std::move(pointcloudFusedMsg)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } +#else + try { + if (mPubFusedCloud) {mPubFusedCloud->publish(std::move(pointcloudFusedMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } +#endif +} + +void ZedCamera::callback_pubPaths() +{ + uint32_t mapPathSub = 0; + uint32_t odomPathSub = 0; + uint32_t utmPathSub = 0; + + try { + mapPathSub = count_subscribers(mPosePathTopic); + odomPathSub = count_subscribers(mOdomPathTopic); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PT("pubPaths: Exception while counting subscribers"); + return; + } + + geometry_msgs::msg::PoseStamped odomPose; + geometry_msgs::msg::PoseStamped mapPose; + geometry_msgs::msg::PoseStamped utmPose; + + odomPose.header.stamp = + mFrameTimestamp + rclcpp::Duration(0, mTfOffset * 1e9); + odomPose.header.frame_id = mMapFrameId; // map_frame + odomPose.pose.position.x = mOdom2BaseTransf.getOrigin().x(); + odomPose.pose.position.y = mOdom2BaseTransf.getOrigin().y(); + odomPose.pose.position.z = mOdom2BaseTransf.getOrigin().z(); + odomPose.pose.orientation.x = mOdom2BaseTransf.getRotation().x(); + odomPose.pose.orientation.y = mOdom2BaseTransf.getRotation().y(); + odomPose.pose.orientation.z = mOdom2BaseTransf.getRotation().z(); + odomPose.pose.orientation.w = mOdom2BaseTransf.getRotation().w(); + + mapPose.header.stamp = + mFrameTimestamp + rclcpp::Duration(0, mTfOffset * 1e9); + mapPose.header.frame_id = mMapFrameId; // map_frame + mapPose.pose.position.x = mMap2BaseTransf.getOrigin().x(); + mapPose.pose.position.y = mMap2BaseTransf.getOrigin().y(); + mapPose.pose.position.z = mMap2BaseTransf.getOrigin().z(); + mapPose.pose.orientation.x = mMap2BaseTransf.getRotation().x(); + mapPose.pose.orientation.y = mMap2BaseTransf.getRotation().y(); + mapPose.pose.orientation.z = mMap2BaseTransf.getRotation().z(); + mapPose.pose.orientation.w = mMap2BaseTransf.getRotation().w(); + + if (mGnssFusionEnabled) { + utmPose.header.stamp = + mFrameTimestamp + rclcpp::Duration(0, mTfOffset * 1e9); + utmPose.header.frame_id = mMapFrameId; // mUtmFrameId; // map_frame + utmPose.pose.position.x = mMap2UtmTransf.getOrigin().x(); + utmPose.pose.position.y = mMap2UtmTransf.getOrigin().y(); + utmPose.pose.position.z = mMap2UtmTransf.getOrigin().z(); + utmPose.pose.orientation.x = mMap2UtmTransf.getRotation().x(); + utmPose.pose.orientation.y = mMap2UtmTransf.getRotation().y(); + utmPose.pose.orientation.z = mMap2UtmTransf.getRotation().z(); + utmPose.pose.orientation.w = mMap2UtmTransf.getRotation().w(); + } + + // Circular vector + if (mPathMaxCount != -1) { + if (mOdomPath.size() == mPathMaxCount) { + DEBUG_STREAM_PT("Path vectors full: rotating "); + std::rotate(mOdomPath.begin(), mOdomPath.begin() + 1, mOdomPath.end()); + std::rotate(mPosePath.begin(), mPosePath.begin() + 1, mPosePath.end()); + + mPosePath[mPathMaxCount - 1] = mapPose; + mOdomPath[mPathMaxCount - 1] = odomPose; + } else { + // DEBUG_STREAM_PT( "Path vectors adding last available poses"); + mPosePath.push_back(mapPose); + mOdomPath.push_back(odomPose); + } + } else { + // DEBUG_STREAM_PT( "No limit path vectors, adding last available + // poses"); + mPosePath.push_back(mapPose); + mOdomPath.push_back(odomPose); + } + + if (mapPathSub > 0) { + auto mapPathMsg = std::make_unique(); + mapPathMsg->header.frame_id = mMapFrameId; + mapPathMsg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : + mFrameTimestamp; + mapPathMsg->poses = mPosePath; + + DEBUG_STREAM_PT("Publishing MAP PATH message"); + try { + if (mPubPosePath) {mPubPosePath->publish(std::move(mapPathMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + + if (odomPathSub > 0) { + auto odomPathMsg = std::make_unique(); + odomPathMsg->header.frame_id = mOdomFrameId; + odomPathMsg->header.stamp = + mUsePubTimestamps ? get_clock()->now() : + mFrameTimestamp; + odomPathMsg->poses = mOdomPath; + + DEBUG_STREAM_PT("Publishing ODOM PATH message"); + try { + if (mPubOdomPath) {mPubOdomPath->publish(std::move(odomPathMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } +} + +void ZedCamera::callback_enableDepth( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Enable Depth service called **"); + + if (mDepthMode == sl::DEPTH_MODE::NONE) { + RCLCPP_WARN( + get_logger(), + "The node was started with depth mode NONE. Depth cannot be enabled."); + res->message = "Depth mode is NONE"; + res->success = false; + return; + } + + if (req->data) { + RCLCPP_INFO(get_logger(), "Depth processing enabled"); + mDepthDisabled = false; + res->message = "Depth processing enabled"; + } else { + RCLCPP_INFO(get_logger(), "Depth processing disabled"); + mDepthDisabled = true; + res->message = "Depth processing disabled"; + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + if (mDepthDisabled && mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3) { +#else + if (mDepthDisabled) { +#endif + RCLCPP_WARN( + get_logger(), + "Depth disabled: Positional Tracking data will not be processed."); + if (mPublishTF) { + RCLCPP_WARN( + get_logger(), + " * Unpredictable TF behavior may occur when Depth will be re-enabled."); + } + } + + if (mDepthDisabled && mObjDetEnabled) { + RCLCPP_WARN( + get_logger(), + "Depth disabled: Object Detection processing will be disabled."); + } + + if (mDepthDisabled && mBodyTrkEnabled) { + RCLCPP_WARN( + get_logger(), + "Depth disabled: Body Tracking processing will be disabled."); + } + + res->success = true; +} + +void ZedCamera::callback_saveAreaMemory( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Save Area Memory service called **"); + + if (!mPosTrackingStarted) { + RCLCPP_WARN(get_logger(), " * Pos. Tracking was not started"); + res->message = "Positional tracking not started"; + res->success = false; + return; + } + + std::string filename = req->area_file_path; + + if (filename.empty()) { + if (mAreaMemoryFilePath.empty()) { + RCLCPP_WARN( + get_logger(), + " * Empty filename and empty 'pos_tracking.area_file_path' parameter."); + res->message = "Empty filename and empty 'pos_tracking.area_file_path' parameter."; + res->success = false; + return; + } + filename = mAreaMemoryFilePath; + } + + filename = sl_tools::getFullFilePath(filename); + + if (!saveAreaMemoryFile(filename) ) { + res->message = "Error saving Area Memory File. Read node log for more information."; + res->success = false; + return; + } + + res->message = "Area Memory File saved"; + res->success = true; +} + +void ZedCamera::callback_resetOdometry( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + (void)req; + + RCLCPP_INFO(get_logger(), "** Reset Odometry service called **"); + mResetOdomFromSrv = true; + res->message = "Odometry reset OK"; + res->success = true; +} + +void ZedCamera::callback_resetPosTracking( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + (void)req; + + RCLCPP_INFO(get_logger(), "** Reset Pos. Tracking service called **"); + + if (!mPosTrackingStarted) { + RCLCPP_WARN(get_logger(), "Pos. Tracking was not started"); + res->message = "Positional tracking not active"; + res->success = false; + return; + } + + mResetOdomFromSrv = true; + mOdomPath.clear(); + mPosePath.clear(); + + // Restart tracking + startPosTracking(); + + res->message = "Positional tracking reset OK"; + res->success = true; +} + +void ZedCamera::callback_setPose( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Set Pose service called **"); + + if (mGnssFusionEnabled) { + RCLCPP_WARN( + get_logger(), + "Service call not valid: GNSS fusion is enabled."); + res->message = "GNSS fusion is enabled"; + res->success = false; + return; + } + + RCLCPP_INFO_STREAM( + get_logger(), + "New pose: [" << req->pos[0] << "," << req->pos[1] << "," + << req->pos[2] << ", " << req->orient[0] + << "," << req->orient[1] << "," + << req->orient[2] << "]"); + + if (!mPosTrackingStarted) { + RCLCPP_WARN(get_logger(), "Pos. Tracking was not active"); + res->message = "Positional tracking not active"; + res->success = false; + return; + } + + mInitialBasePose[0] = req->pos[0]; + mInitialBasePose[1] = req->pos[1]; + mInitialBasePose[2] = req->pos[2]; + + mInitialBasePose[3] = req->orient[0]; + mInitialBasePose[4] = req->orient[1]; + mInitialBasePose[5] = req->orient[2]; + + mResetOdomFromSrv = true; + mOdomPath.clear(); + mPosePath.clear(); + + // Restart tracking + startPosTracking(); + + res->message = "Positional Tracking new pose OK"; + res->success = true; +} + +void ZedCamera::callback_enableObjDet( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Enable Object Detection service called **"); + + std::lock_guard lock(mObjDetMutex); + + if (sl_tools::isZED(mCamRealModel)) { + RCLCPP_WARN(get_logger(), "Object Detection not available for ZED"); + res->message = "Object Detection not available for ZED"; + res->success = false; + return; + } + + if (req->data) { + RCLCPP_INFO(get_logger(), "Starting Object Detection"); + // Start + if (mObjDetEnabled && mObjDetRunning) { + RCLCPP_WARN(get_logger(), "Object Detection is already running"); + res->message = "Object Detection is already running"; + res->success = false; + return; + } + + mObjDetEnabled = true; + + if (startObjDetect()) { + res->message = "Object Detection started"; + res->success = true; + return; + } else { + res->message = + "Error occurred starting Object Detection. Read the log for more info"; + res->success = false; + return; + } + } else { + RCLCPP_INFO(get_logger(), "Stopping Object Detection"); + // Stop + if (!mObjDetEnabled || !mObjDetRunning) { + RCLCPP_WARN(get_logger(), "Object Detection was not running"); + res->message = "Object Detection was not running"; + res->success = false; + return; + } + + stopObjDetect(); + + res->message = "Object Detection stopped"; + res->success = true; + return; + } +} + +void ZedCamera::callback_enableBodyTrk( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Enable Body Tracking service called **"); + + std::lock_guard lock(mBodyTrkMutex); + + if (sl_tools::isZED(mCamRealModel)) { + RCLCPP_WARN(get_logger(), "Body Tracking not available for ZED"); + res->message = "Body Tracking not available for ZED"; + res->success = false; + return; + } + + if (req->data) { + RCLCPP_INFO(get_logger(), "Starting Body Tracking"); + // Start + if (mBodyTrkEnabled && mBodyTrkRunning) { + RCLCPP_WARN(get_logger(), "Body Tracking is already running"); + res->message = "Body Tracking is already running"; + res->success = false; + return; + } + + mBodyTrkEnabled = true; + + if (startBodyTracking()) { + res->message = "Body Tracking started"; + res->success = true; + return; + } else { + res->message = + "Error occurred starting Body Tracking. Read the log for more info"; + res->success = false; + return; + } + } else { + RCLCPP_INFO(get_logger(), "Stopping Body Tracking"); + // Stop + if (!mBodyTrkEnabled || !mBodyTrkRunning) { + RCLCPP_WARN(get_logger(), "Body Tracking was not running"); + res->message = "Body Tracking was not running"; + res->success = false; + return; + } + + stopBodyTracking(); + + res->message = "Body Tracking stopped"; + res->success = true; + return; + } +} + +void ZedCamera::callback_enableMapping( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Enable Spatial Mapping service called **"); + + std::lock_guard lock(mMappingMutex); + + if (req->data) { + RCLCPP_INFO(get_logger(), "Starting Spatial Mapping"); + // Start + if (mMappingEnabled && mSpatialMappingRunning) { + RCLCPP_WARN(get_logger(), "Spatial Mapping is already running"); + res->message = "Spatial Mapping is already running"; + res->success = false; + return; + } + + mMappingEnabled = true; + + if (start3dMapping()) { + res->message = "Spatial Mapping started"; + res->success = true; + return; + } else { + res->message = + "Error occurred starting Spatial Mapping. Read the log for more info"; + res->success = false; + return; + } + } else { + RCLCPP_INFO(get_logger(), "Stopping Spatial Mapping"); + // Stop + if (!mMappingEnabled || !mSpatialMappingRunning) { + mMappingEnabled = false; + mSpatialMappingRunning = false; + + RCLCPP_WARN(get_logger(), "Spatial Mapping was not running"); + res->message = "Spatial Mapping was not running"; + res->success = false; + + return; + } + + stop3dMapping(); + + res->message = "Spatial Mapping stopped"; + res->success = true; + return; + } +} + +void ZedCamera::callback_enableStreaming( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + if (req->data) { + + if (mStreamingServerRunning) { + RCLCPP_WARN(get_logger(), "A Streaming Server is already running"); + res->message = "A Streaming Server is already running"; + res->success = false; + return; + } + // Start + if (startStreamingServer()) { + res->message = "Streaming Server started"; + res->success = true; + return; + } else { + res->message = + "Error occurred starting the Streaming Server. Read the log for more info"; + res->success = false; + return; + } + } else { + // Stop + if (!mStreamingServerRunning) { + RCLCPP_WARN(get_logger(), "There is no Streaming Server active to be stopped"); + res->message = "There is no Streaming Server active to be stopped"; + res->success = false; + return; + } + + RCLCPP_INFO(get_logger(), "Stopping the Streaming Server"); + stopStreamingServer(); + + res->message = "Streaming Server stopped"; + res->success = true; + return; + } +} + +void ZedCamera::callback_startSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Start SVO Recording service called **"); + + if (mSvoMode) { + RCLCPP_WARN( + get_logger(), + "Cannot start SVO recording while playing SVO as input"); + res->message = "Cannot start SVO recording while playing SVO as input"; + res->success = false; + return; + } + + std::lock_guard lock(mRecMutex); + + if (mRecording) { + RCLCPP_WARN(get_logger(), "SVO Recording is already enabled"); + res->message = "SVO Recording is already enabled"; + res->success = false; + return; + } + + mSvoRecBitrate = req->bitrate; + if ((mSvoRecBitrate != 0) && + ((mSvoRecBitrate < 1000) || (mSvoRecBitrate > 60000))) + { + RCLCPP_WARN( + get_logger(), + "'bitrate' value not valid. Please use a value " + "in range [1000,60000], or 0 for default"); + res->message = + "'bitrate' value not valid. Please use a value in range " + "[1000,60000], or 0 for default"; + res->success = false; + return; + } + + if (req->compression_mode < 0 || req->compression_mode > 5) { + RCLCPP_WARN( + get_logger(), + "'compression_mode' mode not valid. Please use a value in " + "range [0,5]"); + res->message = + "'compression_mode' mode not valid. Please use a value in range " + "[0,5]"; + res->success = false; + return; + } + switch (req->compression_mode) { + case 1: + mSvoRecCompression = sl::SVO_COMPRESSION_MODE::H264; + break; + case 3: + mSvoRecCompression = sl::SVO_COMPRESSION_MODE::H264_LOSSLESS; + break; + case 4: + mSvoRecCompression = sl::SVO_COMPRESSION_MODE::H265_LOSSLESS; + break; + case 5: + mSvoRecCompression = sl::SVO_COMPRESSION_MODE::LOSSLESS; + break; + default: + mSvoRecCompression = sl::SVO_COMPRESSION_MODE::H265; + break; + } + mSvoRecFramerate = req->target_framerate; + mSvoRecTranscode = req->input_transcode; + mSvoRecFilename = req->svo_filename; + + if (mSvoRecFilename.empty()) { + mSvoRecFilename = "zed.svo2"; + } + + std::string err; + + if (!startSvoRecording(err)) { + res->message = "Error starting SVO recording: " + err; + res->success = false; + return; + } + + RCLCPP_INFO(get_logger(), "SVO Recording started: "); + RCLCPP_INFO_STREAM(get_logger(), " * Bitrate: " << mSvoRecBitrate); + RCLCPP_INFO_STREAM( + get_logger(), " * Compression mode: " << sl::toString( + mSvoRecCompression).c_str()); + RCLCPP_INFO_STREAM(get_logger(), " * Framerate: " << mSvoRecFramerate); + RCLCPP_INFO_STREAM( + get_logger(), + " * Input Transcode: " << (mSvoRecTranscode ? "TRUE" : "FALSE")); + RCLCPP_INFO_STREAM( + get_logger(), + " * Filename: " << (mSvoRecFilename.empty() ? "zed.svo2" : + mSvoRecFilename)); + + res->message = "SVO Recording started"; + res->success = true; +} + +void ZedCamera::callback_stopSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + (void)req; + + RCLCPP_INFO(get_logger(), "** Stop SVO Recording service called **"); + + std::lock_guard lock(mRecMutex); + + if (!mRecording) { + RCLCPP_WARN(get_logger(), "SVO Recording is NOT enabled"); + res->message = "SVO Recording is NOT enabled"; + res->success = false; + return; + } + + stopSvoRecording(); + + RCLCPP_INFO(get_logger(), "SVO Recording stopped"); + res->message = "SVO Recording stopped"; + res->success = true; +} + + +void ZedCamera::callback_pauseSvoInput( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Pause SVO Input service called **"); + + std::lock_guard lock(mRecMutex); + + if (!mSvoMode) { + RCLCPP_WARN(get_logger(), "The node is not using an SVO as input"); + res->message = "The node is not using an SVO as input"; + res->success = false; + return; + } + +#ifndef USE_SVO_REALTIME_PAUSE + if (mSvoRealtime) { + RCLCPP_WARN( + get_logger(), + "SVO input can be paused only if SVO is not in RealTime mode"); + res->message = + "SVO input can be paused only if SVO is not in RealTime mode"; + res->success = false; + mSvoPause = false; + return; + } +#endif + + if (!mSvoPause) { + RCLCPP_WARN(get_logger(), "SVO is paused"); + res->message = "SVO is paused"; + mSvoPause = true; + } else { + RCLCPP_WARN(get_logger(), "SVO is playing"); + res->message = "SVO is playing"; + mSvoPause = false; + } + res->success = true; + +#ifdef USE_SVO_REALTIME_PAUSE + mZed->pauseSVOReading(mSvoPause); +#endif + +} + +void ZedCamera::callback_setSvoFrame( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Set SVO Frame service called **"); + + constexpr double SVO_FRAME_SET_MIN_INTERVAL = + 0.5; // Minimum interval in seconds between frame changes to prevent + // excessive seeking + + // ----> Check service call frequency + if (mSetSvoFrameCheckTimer.toc() < SVO_FRAME_SET_MIN_INTERVAL) { + RCLCPP_WARN(get_logger(), "SVO frame set too fast"); + res->message = "SVO frame set too fast"; + res->success = false; + return; + } + mSetSvoFrameCheckTimer.tic(); + // <---- Check service call frequency + + std::lock_guard lock(mRecMutex); + + if (!mSvoMode) { + RCLCPP_WARN(get_logger(), "The node is not using an SVO as input"); + res->message = "The node is not using an SVO as input"; + res->success = false; + return; + } + + int frame = req->frame_id; + int svo_frames = mZed->getSVONumberOfFrames(); + if (frame >= svo_frames) { + std::stringstream ss; + ss << "Frame number is out of range. SVO has " << svo_frames << " frames"; + RCLCPP_WARN(get_logger(), ss.str().c_str()); + res->message = ss.str(); + res->success = false; + return; + } + + mZed->setSVOPosition(frame); + RCLCPP_INFO_STREAM(get_logger(), "SVO frame set to " << frame); + res->message = "SVO frame set to " + std::to_string(frame); + + if (isPosTrackingRequired()) { + // Reset odometry and paths + // ----> Set camera pose to identity + RCLCPP_WARN(get_logger(), " * Camera pose reset to identity."); + mInitialBasePose[0] = 0.0; + mInitialBasePose[1] = 0.0; + mInitialBasePose[2] = 0.0; + + mInitialBasePose[3] = 0.0; + mInitialBasePose[4] = 0.0; + mInitialBasePose[5] = 0.0; + + mResetOdomFromSrv = true; + mOdomPath.clear(); + mPosePath.clear(); + + // Restart tracking + startPosTracking(); + // <---- Set camera pose to identity + } + + //if svo is paused, ensure one grab can update topics + if (mSvoPause) { + mGrabOnce = true; + mGrabImuOnce = true; + } + + res->success = true; +} + +void ZedCamera::callback_updateDiagnostic( + diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + DEBUG_COMM("=== Update Diagnostic ==="); + + std::lock_guard lock(mCloseCameraMutex); + + if (mZed == nullptr) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::ERROR, + "Camera not opened"); + return; + } + + if (mConnStatus != sl::ERROR_CODE::SUCCESS) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::ERROR, + sl::toString(mConnStatus).c_str()); + return; + } + + if (mPoseLocked && (mPoseLockCount > mCamGrabFrameRate)) { // > 1 second + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "Positional Tracking locked. Call the service 'reset_pos_tracking' to reset."); + } + + stat.addf("Uptime", "%s", sl_tools::seconds2str(mUptimer.toc()).c_str()); + + if (mGrabStatus == sl::ERROR_CODE::SUCCESS || mGrabStatus == sl::ERROR_CODE::CORRUPTED_FRAME) { + stat.addf("IPC Enabled", "%s", mUsingIPC ? "YES" : "NO"); + + + double freq = 1. / mGrabPeriodMean_sec->getAvg(); + double freq_perc = 0.0; + if (mGrabComputeCappingFps > 0.0 && + mGrabComputeCappingFps < mCamGrabFrameRate) + { + stat.addf("Camera Grab rate -capped-", "%.1f Hz", mGrabComputeCappingFps); + freq_perc = 100. * freq / mGrabComputeCappingFps; + stat.addf( + "Data Capture -capped-", "%.1f Hz (%.1f%%)", freq, freq_perc); + } else { + stat.addf("Camera Grab rate", "%d Hz", mCamGrabFrameRate); + freq_perc = 100. * freq / mCamGrabFrameRate; + stat.addf( + "Data Capture", "Mean Frequency: %.1f Hz (%.1f%%)", freq, + freq_perc); + } + + double frame_proc_sec = mElabPeriodMean_sec->getAvg(); + double frame_grab_period = 1. / mCamGrabFrameRate; + stat.addf( + "Data Capture", "Tot. Processing Time: %.6f sec (Max. %.3f sec)", + frame_proc_sec, frame_grab_period); + + if (frame_proc_sec > frame_grab_period) { + mSysOverloadCount++; + } + + if (mSysOverloadCount >= 10) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "System overloaded. Consider reducing " + "'general.pub_frame_rate' or 'general.grab_resolution'"); + } else { + mSysOverloadCount = 0; + + if (!mPoseLocked) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::OK, + "Camera grabbing"); + } + } + + // ----> Frame drop count + auto dropped = mZed->getFrameDroppedCount(); + uint64_t total = dropped + mFrameCount; + auto perc_drop = 100. * static_cast(dropped) / total; + stat.addf( + "Frame Drop rate", "%u/%lu (%g%%)", + dropped, total, perc_drop); + // <---- Frame drop count + + if (mSimMode) { + stat.add("Input mode", "SIMULATION"); + } else if (mSvoMode) { + stat.add("Input mode", "SVO"); + } else if (mStreamMode) { + stat.add("Input mode", "LOCAL STREAM"); + } else { + stat.add("Input mode", "Live Camera"); + } + + if (mVdPublishing) { + if (mSvoMode && !mSvoRealtime) { + freq = 1. / mGrabPeriodMean_sec->getAvg(); + freq_perc = 100. * freq / mVdPubRate; + stat.addf( + "Video/Depth", "Mean Frequency: %.1f Hz (%.1f%%)", freq, + freq_perc); + } else { + freq = 1. / mVideoDepthPeriodMean_sec->getAvg(); + freq_perc = 100. * freq / mVdPubRate; + frame_grab_period = 1. / mVdPubRate; + stat.addf( + "Video/Depth", "Mean Frequency: %.1f Hz (%.1f%%)", freq, + freq_perc); + } + stat.addf( + "Video/Depth", "Processing Time: %.6f sec (Max. %.3f sec)", + mVideoDepthElabMean_sec->getAvg(), frame_grab_period); + } else { + stat.add("Video/Depth", "Topic not subscribed"); + } + + if (mSvoMode) { + double svo_perc = 100. * (static_cast(mSvoFrameId) / mSvoFrameCount); + + stat.addf( + "Playing SVO", "%sFrame: %d/%d (%.1f%%)", + (mSvoPause ? "PAUSED - " : ""), mSvoFrameId, mSvoFrameCount, svo_perc); + stat.addf("SVO Loop", "%s", (mSvoLoop ? "ON" : "OFF")); + if (mSvoLoop) { + stat.addf("SVO Loop Count", "%d", mSvoLoopCount); + } + stat.addf("SVO Real Time mode", "%s", (mSvoRealtime ? "ON" : "OFF")); + if (!mSvoRealtime) { + stat.addf("SVO Playback rate", "%.1fx -> %.1f Hz", mSvoRate, mSvoRate * mCamGrabFrameRate); + } + } + + if (!mDepthDisabled) { + stat.add("Depth status", "ACTIVE"); + stat.add("Depth mode", sl::toString(mDepthMode).c_str()); + + if (mPcPublishing) { + freq = 1. / mPcPeriodMean_sec->getAvg(); + freq_perc = 100. * freq / mPcPubRate; + stat.addf( + "Point Cloud", "Mean Frequency: %.1f Hz (%.1f%%)", freq, + freq_perc); + stat.addf( + "Point Cloud", "Processing Time: %.3f sec (Max. %.3f sec)", + mPcProcMean_sec->getAvg(), 1. / mPcPubRate); + } else { + stat.add("Point Cloud", "Topic not subscribed"); + } + + if (mFloorAlignment) { + if (mPosTrackingStatus.spatial_memory_status == sl::SPATIAL_MEMORY_STATUS::SEARCHING) { + stat.add("Floor Detection", "NOT INITIALIZED"); + } else { + stat.add("Floor Detection", "INITIALIZED"); + } + } + + if (mAutoRoiEnabled) { + stat.add("Automatic ROI", sl::toString(mAutoRoiStatus).c_str()); + } else if (mManualRoiEnabled) { + stat.add("Manual ROI", "ENABLED"); + } + + if (mObjDetRunning) { + if (mObjDetSubscribed) { + freq = 1. / mObjDetPeriodMean_sec->getAvg(); + freq_perc = 100. * freq / mVdPubRate; + frame_grab_period = 1. / mVdPubRate; + stat.addf( + "Object detection", "Mean Frequency: %.3f Hz (%.1f%%)", + freq, freq_perc); + stat.addf( + "Object detection", + "Processing Time: %.3f sec (Max. %.3f sec)", + mObjDetElabMean_sec->getAvg(), frame_grab_period); + } else { + stat.add("Object Detection", "Active, topic not subscribed"); + } + } else { + stat.add("Object Detection", "INACTIVE"); + } + + if (mBodyTrkRunning) { + if (mBodyTrkSubscribed) { + freq = 1. / mBodyTrkPeriodMean_sec->getAvg(); + freq_perc = 100. * freq / mVdPubRate; + frame_grab_period = 1. / mVdPubRate; + stat.addf( + "Body Tracking", "Mean Frequency: %.3f Hz (%.1f%%)", + freq, freq_perc); + stat.addf( + "Body Tracking", + "Processing Time: %.3f sec (Max. %.3f sec)", + mBodyTrkElabMean_sec->getAvg(), frame_grab_period); + } else { + stat.add("Body Tracking", "Active, topic not subscribed"); + } + } else { + stat.add("Body Tracking", "INACTIVE"); + } + } else { + stat.add("Depth status", "INACTIVE"); + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + if (!mDepthDisabled || mPosTrkMode == sl::POSITIONAL_TRACKING_MODE::GEN_3) { // With ZED SDK v5.2 we can use `GEN_3` even if depth is disabled +#else + if (!mDepthDisabled) { +#endif + stat.addf( + "Positional Tracking mode", "%s", + sl::toString(mPosTrkMode).c_str()); + + if (mPosTrackingStarted) { + stat.addf( + "Odometry status", "%s", + sl::toString(mPosTrackingStatus.odometry_status).c_str()); + stat.addf( + "Spatial Memory status", "%s", + sl::toString(mPosTrackingStatus.spatial_memory_status).c_str()); + stat.addf( + "Tracking Fusion status", "%s", + sl::toString(mPosTrackingStatus.tracking_fusion_status).c_str()); + + if (mPublishTF) { + freq = 1. / mPubOdomTF_sec->getAvg(); + stat.addf("TF Odometry", "Mean Frequency: %.1f Hz", freq); + + if (mPublishMapTF) { + freq = 1. / mPubPoseTF_sec->getAvg(); + stat.addf("TF Pose", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("TF Pose", "DISABLED"); + } + } else { + stat.add("TF Odometry", "DISABLED"); + stat.add("TF Pose", "DISABLED"); + } + + if (mMappingEnabled) { + if (mSpatialMappingRunning) { + stat.add("3D Mapping", "ACTIVE"); + } else { + stat.add("3D Mapping", "STARTING..."); + } + } else { + stat.add("3D Mapping", "INACTIVE"); + } + } else { + stat.add("Pos. Tracking status", "INACTIVE"); + } + + if (mGnssFusionEnabled) { + stat.addf("Fusion status", sl::toString(mFusionStatus).c_str()); + if (mPosTrackingStarted) { + stat.addf( + "GNSS Tracking status", "%s", + sl::toString(mFusedPosTrackingStatus.gnss_fusion_status).c_str()); + } + if (mGnssMsgReceived) { + freq = 1. / mGnssFix_sec->getAvg(); + stat.addf("GNSS input", "Mean Frequency: %.1f Hz", freq); + stat.addf("GNSS Services", "%s", mGnssServiceStr.c_str()); + if (mGnssFixValid) { + stat.add("GNSS Status", "FIX OK"); + } else { + stat.add("GNSS Status", "NO FIX"); + } + } else { + stat.add("GNSS Fusion", "Waiting for GNSS messages"); + } + } else { + stat.add("GNSS Fusion", "DISABLED"); + } + } else { + stat.add("Positional Tracking", "N/A"); + } + + if (mPublishImuTF) { + freq = 1. / mPubImuTF_sec->getAvg(); + stat.addf("TF IMU", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("TF IMU", "DISABLED"); + } + + if (mGrabStatus == sl::ERROR_CODE::CORRUPTED_FRAME) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "Performance Degraded - Corrupted frame received"); + } + } else if (mGrabStatus == sl::ERROR_CODE::LAST) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::OK, + "Camera initializing"); + } else { + stat.summaryf( + diagnostic_msgs::msg::DiagnosticStatus::ERROR, + "%s", sl::toString(mGrabStatus).c_str()); + } + + if (mImuPublishing) { + double freq = 1. / mImuPeriodMean_sec->getAvg(); + stat.addf("IMU", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("IMU Sensor", "Topics not subscribed"); + } + + if (!mSvoMode && !mSimMode && sl_tools::isZED2OrZED2i(mCamRealModel)) { + if (mMagPublishing) { + double freq = 1. / mMagPeriodMean_sec->getAvg(); + stat.addf("Magnetometer", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("Magnetometer Sensor", "Topics not subscribed"); + } + } else { + stat.add("Magnetometer Sensor", "N/A"); + } + + if (!mSvoMode && !mSimMode && sl_tools::isZED2OrZED2i(mCamRealModel)) { + if (mBaroPublishing) { + double freq = 1. / mBaroPeriodMean_sec->getAvg(); + stat.addf("Barometer", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("Barometer Sensor", "Topics not subscribed"); + } + } else { + stat.add("Barometer Sensor", "N/A"); + } + + if (!mSvoMode && !mSimMode && sl_tools::isZED2OrZED2i(mCamRealModel)) { + stat.addf("Left CMOS Temp.", "%.1f °C", mTempLeft); + stat.addf("Right CMOS Temp.", "%.1f °C", mTempRight); + + if (mTempLeft > 70.f || mTempRight > 70.f) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "High Camera temperature"); + } + } else { + stat.add("Left CMOS Temp.", "N/A"); + stat.add("Right CMOS Temp.", "N/A"); + } + + if (!mSvoMode && !mSimMode && sl_tools::isZEDX(mCamRealModel)) { + stat.addf("Camera Temp.", "%.1f °C", mTempImu); + + if (mTempImu > 70.f) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "High Camera temperature"); + } + } + + if (mRecording) { + if (!mRecStatus.status) { + // if (mGrabActive) + { + stat.add("SVO Recording", "ERROR"); + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "Error adding frames to SVO file while recording. " + "Check " + "free disk space"); + } + } else { + stat.add("SVO Recording", "ACTIVE"); + stat.addf( + "SVO compression time", "%g msec", + mRecStatus.average_compression_time); + stat.addf( + "SVO compression ratio", "%.1f%%", + mRecStatus.average_compression_ratio); + } + } else { + stat.add("SVO Recording", "NOT ACTIVE"); + } + + if (mStreamingServerRunning) { + stat.add("Streaming Server", "ACTIVE"); + + sl::StreamingParameters params; + params = mZed->getStreamingParameters(); + + stat.addf("Streaming port", "%d", static_cast(params.port)); + stat.addf( + "Streaming codec", "%s", + (params.codec == sl::STREAMING_CODEC::H264 ? "H264" : "H265")); + stat.addf("Streaming bitrate", "%d mbps", static_cast(params.bitrate)); + stat.addf("Streaming chunk size", "%d B", static_cast(params.chunk_size)); + stat.addf("Streaming GOP size", "%d", static_cast(params.gop_size)); + } else { + stat.add("Streaming Server", "NOT ACTIVE"); + } +} + +void ZedCamera::callback_gnssPubTimerTimeout() +{ + if (!mGnssMsgReceived) { + return; + } + + mGnssMsgReceived = false; + + RCLCPP_WARN( + get_logger(), + "* GNSS subscriber timeout. No GNSS data available."); + + mGnssPubCheckTimer.reset(); +} + +void ZedCamera::processSvoGnssData() +{ + if (!mGnssReplay) { + mGnssFixValid = false; + return; + } + uint64_t current_ts = mLastZedPose.timestamp.getNanoseconds(); + static uint64_t old_gnss_ts = 0; + + if (current_ts == old_gnss_ts) { + return; + } + old_gnss_ts = current_ts; + + // DEBUG_STREAM_GNSS("Retrieving GNSS data from SVO. TS: " << current_ts + // << " nsec"); + + sl::GNSSData gnssData; + auto err = mGnssReplay->grab(gnssData, current_ts); + if (err != sl::FUSION_ERROR_CODE::SUCCESS) { + //DEBUG_STREAM_GNSS("Error grabbing GNSS data from SVO: " << sl::toString(err)); + return; + } + + mGnssMsgReceived = true; + + // ----> GNSS Fix stats + double elapsed_sec = mGnssFixFreqTimer.toc(); + mGnssFix_sec->addValue(elapsed_sec); + mGnssFixFreqTimer.tic(); + // <---- GNSS Fix stats + + double latitude; + double longitude; + double altitude; + + gnssData.getCoordinates(latitude, longitude, altitude, false); + mGnssTimestamp = rclcpp::Time(gnssData.ts.getNanoseconds(), RCL_ROS_TIME); + + DEBUG_STREAM_GNSS( + "Latest GNSS data from SVO - [" + << mGnssTimestamp.nanoseconds() << " nsec] " << latitude + << "°," << longitude << "° / " << altitude << " m"); + + std::stringstream ss; + for (size_t i = 0; i < gnssData.position_covariance.size(); i++) { + ss << gnssData.position_covariance[i]; + if (i != gnssData.position_covariance.size() - 1) {ss << ", ";} + } + DEBUG_STREAM_GNSS("Covariance- [" << ss.str() << "]"); + DEBUG_STREAM_GNSS("GNSS Status: " << sl::toString(gnssData.gnss_status)); + + if (gnssData.gnss_status == sl::GNSS_STATUS::UNKNOWN) { + gnssData.gnss_status = sl::GNSS_STATUS::SINGLE; + DEBUG_STREAM_GNSS(" * Forced to: " << sl::toString(gnssData.gnss_status)); + } + + DEBUG_STREAM_GNSS("GNSS Mode: " << sl::toString(gnssData.gnss_mode)); + + mGnssFixValid = true; // Used to keep track of signal loss + + if (mZed->isOpened() && mZed->isPositionalTrackingEnabled()) { + auto ingest_error = mFusion.ingestGNSSData(gnssData); + if (ingest_error == sl::FUSION_ERROR_CODE::SUCCESS) { + DEBUG_STREAM_GNSS( + "Datum ingested - [" + << mGnssTimestamp.nanoseconds() << " nsec] " << latitude + << "°," << longitude << "° / " << altitude << " m"); + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "Ingest error occurred when ingesting GNSSData: " + << sl::toString(ingest_error)); + } + mGnssFixNew = true; + } +} + +void ZedCamera::callback_gnssFix(const sensor_msgs::msg::NavSatFix::SharedPtr msg) +{ + sl::GNSSData gnssData; + + // ----> GNSS Fix stats + double elapsed_sec = mGnssFixFreqTimer.toc(); + mGnssFix_sec->addValue(elapsed_sec); + mGnssFixFreqTimer.tic(); + // <---- GNSS Fix stats + + if (!mGnssPubCheckTimer) { + std::chrono::milliseconds gnssTimeout_msec(static_cast(2000.0)); + mGnssPubCheckTimer = create_wall_timer( + std::chrono::duration_cast( + gnssTimeout_msec), + std::bind(&ZedCamera::callback_gnssPubTimerTimeout, this)); + } else { + mGnssPubCheckTimer->reset(); + } + + mGnssMsgReceived = true; + + RCLCPP_INFO_ONCE(get_logger(), "=== GNSS subscriber ==="); + RCLCPP_INFO_ONCE( + get_logger(), + " * First message received. GNSS Sender active."); + + mGnssServiceStr = ""; + mGnssService = msg->status.service; + if (mGnssService & sensor_msgs::msg::NavSatStatus::SERVICE_GPS) { + mGnssServiceStr += "GPS "; + } + if (mGnssService & sensor_msgs::msg::NavSatStatus::SERVICE_GLONASS) { + mGnssServiceStr += "GLONASS "; + } + if (mGnssService & sensor_msgs::msg::NavSatStatus::SERVICE_COMPASS) { + mGnssServiceStr += "COMPASS "; + } + if (mGnssService & sensor_msgs::msg::NavSatStatus::SERVICE_GALILEO) { + mGnssServiceStr += "GALILEO"; + } + + RCLCPP_INFO_STREAM_ONCE( + get_logger(), + " * Service: " << mGnssServiceStr.c_str()); + + if (msg->status.status == sensor_msgs::msg::NavSatStatus::STATUS_NO_FIX) { + DEBUG_GNSS("callback_gnssFix: fix not valid"); + if (mGnssFixValid) { + RCLCPP_INFO(get_logger(), "GNSS: signal lost."); + } + mGnssFixValid = false; + + return; + } + + switch (msg->status.status) { + case ::sensor_msgs::msg::NavSatStatus::STATUS_SBAS_FIX: + gnssData.gnss_status = sl::GNSS_STATUS::RTK_FIX; + break; + case ::sensor_msgs::msg::NavSatStatus::STATUS_GBAS_FIX: + gnssData.gnss_status = sl::GNSS_STATUS::RTK_FLOAT; + break; + case ::sensor_msgs::msg::NavSatStatus::STATUS_FIX: + default: + gnssData.gnss_status = sl::GNSS_STATUS::SINGLE; + break; + } + + // ----> Check timestamp + // Note: this is the ROS timestamp, not the GNSS timestamp that is available + // in a + // "sensor_msgs/TimeReference message", e.g. `/time_reference` + uint64_t ts_gnss_part_sec = + static_cast(msg->header.stamp.sec) * 1000000000; + uint64_t ts_gnss_part_nsec = + static_cast(msg->header.stamp.nanosec); + uint64_t ts_gnss_nsec = ts_gnss_part_sec + ts_gnss_part_nsec; + + DEBUG_STREAM_GNSS( + "GNSS Ts: " << ts_gnss_part_sec / 1000000000 << " sec + " + << ts_gnss_part_nsec << " nsec = " + << ts_gnss_nsec << " nsec fused"); + + if (ts_gnss_nsec <= mLastTs_gnss_nsec) { + DEBUG_GNSS( + "callback_gnssFix: data not valid (timestamp did not increment)"); + return; + } + + DEBUG_STREAM_GNSS( + "GNSS dT: " << ts_gnss_nsec - mLastTs_gnss_nsec + << " nsec"); + mLastTs_gnss_nsec = ts_gnss_nsec; + // <---- Check timestamp + + mGnssTimestamp = rclcpp::Time(ts_gnss_nsec, RCL_ROS_TIME); + DEBUG_STREAM_GNSS("Stored Ts: " << mGnssTimestamp.nanoseconds()); + + double altit = msg->altitude; + if (mGnssZeroAltitude) { + altit = 0.0; + } + double latit = msg->latitude; + double longit = msg->longitude; + + // std::lock_guard lock(mGnssDataMutex); + gnssData.ts.setNanoseconds(ts_gnss_nsec); + gnssData.setCoordinates(latit, longit, altit, false); + + if (msg->position_covariance_type != + sensor_msgs::msg::NavSatFix::COVARIANCE_TYPE_UNKNOWN) + { + gnssData.latitude_std = msg->position_covariance[0]; + gnssData.longitude_std = msg->position_covariance[1 * 3 + 1]; + gnssData.altitude_std = msg->position_covariance[2 * 3 + 2]; + if (mGnssZeroAltitude) { + gnssData.altitude_std = 1e-9; + } + std::array position_covariance{}; + position_covariance[0] = gnssData.latitude_std * mGnssHcovMul; // X + position_covariance[1 * 3 + 1] = + gnssData.longitude_std * mGnssHcovMul; // Y + position_covariance[2 * 3 + 2] = + gnssData.altitude_std * mGnssVcovMul; // Z + gnssData.position_covariance = position_covariance; + } + + if (!mGnssFixValid) { + DEBUG_GNSS("GNSS: valid fix received."); + DEBUG_STREAM_GNSS( + " * First valid datum - Lat: " + << std::fixed << std::setprecision(9) << latit + << "° - Long: " << longit << "° - Alt: " << altit + << " m"); + } + + mGnssFixValid = true; // Used to keep track of signal loss + + if (mZed->isOpened() && mZed->isPositionalTrackingEnabled()) { + auto ingest_error = mFusion.ingestGNSSData(gnssData); + if (ingest_error == sl::FUSION_ERROR_CODE::SUCCESS) { + DEBUG_STREAM_GNSS( + "Datum ingested - [" + << mGnssTimestamp.nanoseconds() << " nsec] " << latit + << "°," << longit << "° / " << altit << " m"); + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "Ingest error occurred when ingesting GNSSData: " + << sl::toString(ingest_error)); + } + mGnssFixNew = true; + } +} + +void ZedCamera::callback_clickedPoint( + const geometry_msgs::msg::PointStamped::SharedPtr msg) +{ + // ----> Check for result subscribers + size_t markerSubCount = 0; + size_t planeSubCount = 0; + try { + if (mPubMarker) {markerSubCount = count_subscribers(mPubMarker->get_topic_name());} + if (mPubPlane) {planeSubCount = count_subscribers(mPubPlane->get_topic_name());} + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_MAP( + "callback_clickedPoint: Exception while counting point plane " + "subscribers"); + return; + } + + if ((markerSubCount + planeSubCount) == 0) { + return; + } + // <---- Check for result subscribers + + rclcpp::Time ts = get_clock()->now(); + + float X = msg->point.x; + float Y = msg->point.y; + float Z = msg->point.z; + + RCLCPP_INFO_STREAM( + get_logger(), "Clicked 3D point [X FW, Y LF, Z UP]: [" + << X << "," << Y << "," << Z << "]"); + + // ----> Transform the point from `map` frame to `left_camera_frame_optical` + double camX, camY, camZ; + try { + // Save the transformation + geometry_msgs::msg::TransformStamped m2o = + mTfBuffer->lookupTransform( + mLeftCamOptFrameId, msg->header.frame_id, + TIMEZERO_SYS, rclcpp::Duration(1, 0)); + + RCLCPP_INFO( + get_logger(), + "'%s' -> '%s': {%.3f,%.3f,%.3f} {%.3f,%.3f,%.3f,%.3f}", + msg->header.frame_id.c_str(), mLeftCamOptFrameId.c_str(), + m2o.transform.translation.x, m2o.transform.translation.y, + m2o.transform.translation.z, m2o.transform.rotation.x, + m2o.transform.rotation.y, m2o.transform.rotation.z, + m2o.transform.rotation.w); + + // Get the TF2 transformation + geometry_msgs::msg::PointStamped ptCam; + + tf2::doTransform(*(msg.get()), ptCam, m2o); + + camX = ptCam.point.x; + camY = ptCam.point.y; + camZ = ptCam.point.z; + + RCLCPP_INFO( + get_logger(), + "Point in camera coordinates [Z FW, X RG, Y DW]: {%.3f,%.3f,%.3f}", + camX, camY, camZ); + } catch (tf2::TransformException & ex) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "Transform error: %s", ex.what()); + RCLCPP_WARN_THROTTLE( + get_logger(), steady_clock, 1000.0, + "The tf from '%s' to '%s' is not available.", + msg->header.frame_id.c_str(), + mLeftCamOptFrameId.c_str()); + + return; + } + // <---- Transform the point from `map` frame to `left_camera_frame_optical` + + // ----> Project the point into 2D image coordinates + sl::CalibrationParameters zedParam; + zedParam = mZed->getCameraInformation(mMatResol) + .camera_configuration.calibration_parameters; // ok + + float f_x = zedParam.left_cam.fx; + float f_y = zedParam.left_cam.fy; + float c_x = zedParam.left_cam.cx; + float c_y = zedParam.left_cam.cy; + + float out_scale_factor = static_cast(mMatResol.width) / mCamWidth; + + float u = ((camX / camZ) * f_x + c_x) / out_scale_factor; + float v = ((camY / camZ) * f_y + c_y) / out_scale_factor; + DEBUG_STREAM_MAP( + "Clicked point image coordinates: [" + << u << "," << v + << "] - out_scale_factor: " << out_scale_factor); + // <---- Project the point into 2D image coordinates + + // ----> Extract plane from clicked point + sl::Plane plane; + sl::PlaneDetectionParameters params; + params.max_distance_threshold = mPdMaxDistanceThreshold; + params.normal_similarity_threshold = mPdNormalSimilarityThreshold; + sl::ERROR_CODE err = mZed->findPlaneAtHit(sl::uint2(u, v), plane, params); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN( + get_logger(), + "Error extracting plane at point [%.3f,%.3f,%.3f]: %s", X, Y, + Z, sl::toString(err).c_str()); + return; + } + + sl::float3 center = plane.getCenter(); + sl::float2 dims = plane.getExtents(); + + if (dims[0] == 0 || dims[1] == 0) { + RCLCPP_INFO( + get_logger(), "Plane not found at point [%.3f,%.3f,%.3f]", X, + Y, Z); + return; + } + + DEBUG_MAP( + "Found plane at point [%.3f,%.3f,%.3f] -> Center: [%.3f,%.3f,%.3f], " + "Dims: %.3fx%.3f", + X, Y, Z, center.x, center.y, center.z, dims[0], dims[1]); + // <---- Extract plane from clicked point + + if (markerSubCount > 0) { + // ----> Publish a blue sphere in the clicked point + auto pt_marker = std::make_unique(); + // Set the frame ID and timestamp. See the TF tutorials for information + // on these. + static int hit_pt_id = + 0; // This ID must be unique in the same process. Thus it is good to + // keep it as a static variable + pt_marker->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts; + // Set the marker action. Options are ADD and DELETE + pt_marker->action = visualization_msgs::msg::Marker::ADD; + pt_marker->lifetime = rclcpp::Duration(0, 0); + + // Set the namespace and id for this marker. This serves to create a + // unique ID Any marker sent with the same namespace and id will overwrite + // the old one + pt_marker->ns = "plane_hit_points"; + pt_marker->id = hit_pt_id++; + pt_marker->header.frame_id = mMapFrameId; + + // Set the marker type. + pt_marker->type = visualization_msgs::msg::Marker::SPHERE; + + // Set the pose of the marker. + // This is a full 6DOF pose relative to the frame/time specified in the + // header + pt_marker->pose.position.x = X; + pt_marker->pose.position.y = Y; + pt_marker->pose.position.z = Z; + pt_marker->pose.orientation.x = 0.0; + pt_marker->pose.orientation.y = 0.0; + pt_marker->pose.orientation.z = 0.0; + pt_marker->pose.orientation.w = 1.0; + + // Set the scale of the marker -- 1x1x1 here means 1m on a side + pt_marker->scale.x = 0.025; + pt_marker->scale.y = 0.025; + pt_marker->scale.z = 0.025; + + // Set the color -- be sure to set alpha to something non-zero! + pt_marker->color.r = 0.2f; + pt_marker->color.g = 0.1f; + pt_marker->color.b = 0.75f; + pt_marker->color.a = 0.8; + + // Publish the marker + DEBUG_STREAM_MAP("Publishing PT MARKER message"); + try { + if (mPubMarker) {mPubMarker->publish(std::move(pt_marker));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + // ----> Publish a blue sphere in the clicked point + + // ----> Publish the plane as green mesh + auto plane_marker = std::make_unique(); + // Set the frame ID and timestamp. See the TF tutorials for information + // on these. + static int plane_mesh_id = + 0; // This ID must be unique in the same process. Thus it is good to + // keep it as a static variable + plane_marker->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts; + // Set the marker action. Options are ADD and DELETE + plane_marker->action = visualization_msgs::msg::Marker::ADD; + plane_marker->lifetime = rclcpp::Duration(0, 0); + + // Set the namespace and id for this marker. This serves to create a + // unique ID Any marker sent with the same namespace and id will overwrite + // the old one + plane_marker->ns = "plane_meshes"; + plane_marker->id = plane_mesh_id++; + plane_marker->header.frame_id = mLeftCamFrameId; + + // Set the marker type. + plane_marker->type = visualization_msgs::msg::Marker::TRIANGLE_LIST; + + // Set the pose of the marker. This isplane_marker + plane_marker->pose.orientation.w = 1.0; + + // Set the color -- be sure to set alpha to something non-zero! + plane_marker->color.r = 0.10f; + plane_marker->color.g = 0.75f; + plane_marker->color.b = 0.20f; + plane_marker->color.a = 0.75; + + // Set the scale of the marker -- 1x1x1 here means 1m on a side + plane_marker->scale.x = 1.0; + plane_marker->scale.y = 1.0; + plane_marker->scale.z = 1.0; + + sl::Mesh mesh = plane.extractMesh(); + size_t triangCount = mesh.getNumberOfTriangles(); + size_t ptCount = triangCount * 3; + plane_marker->points.resize(ptCount); + plane_marker->colors.resize(ptCount); + + size_t ptIdx = 0; + for (size_t t = 0; t < triangCount; t++) { + for (int p = 0; p < 3; p++) { + uint vIdx = mesh.triangles[t][p]; + plane_marker->points[ptIdx].x = mesh.vertices[vIdx][0]; + plane_marker->points[ptIdx].y = mesh.vertices[vIdx][1]; + plane_marker->points[ptIdx].z = mesh.vertices[vIdx][2]; + + // Set the color -- be sure to set alpha to something non-zero! + plane_marker->colors[ptIdx].r = 0.10f; + plane_marker->colors[ptIdx].g = 0.75f; + plane_marker->colors[ptIdx].b = 0.20f; + plane_marker->colors[ptIdx].a = 0.75; + + ptIdx++; + } + } + + // Publish the marker + DEBUG_STREAM_MAP("Publishing PLANE MARKER message"); + try { + if (mPubMarker) {mPubMarker->publish(std::move(plane_marker));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + // <---- Publish the plane as green mesh + } + + if (planeSubCount > 0) { + // ----> Publish the plane as custom message + + auto planeMsg = std::make_unique(); + planeMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : ts; + planeMsg->header.frame_id = mLeftCamFrameId; + + // Plane equation + sl::float4 sl_coeff = plane.getPlaneEquation(); + planeMsg->coefficients.coef[0] = static_cast(sl_coeff[0]); + planeMsg->coefficients.coef[1] = static_cast(sl_coeff[1]); + planeMsg->coefficients.coef[2] = static_cast(sl_coeff[2]); + planeMsg->coefficients.coef[3] = static_cast(sl_coeff[3]); + + // Plane Normal + sl::float3 sl_normal = plane.getNormal(); + planeMsg->normal.x = sl_normal[0]; + planeMsg->normal.y = sl_normal[1]; + planeMsg->normal.z = sl_normal[2]; + + // Plane Center + sl::float3 sl_center = plane.getCenter(); + planeMsg->center.x = sl_center[0]; + planeMsg->center.y = sl_center[1]; + planeMsg->center.z = sl_center[2]; + + // Plane extents + sl::float3 sl_extents = plane.getExtents(); + planeMsg->extents[0] = sl_extents[0]; + planeMsg->extents[1] = sl_extents[1]; + + // Plane pose + sl::Pose sl_pose = plane.getPose(); + sl::Orientation sl_rot = sl_pose.getOrientation(); + sl::Translation sl_tr = sl_pose.getTranslation(); + + planeMsg->pose.rotation.x = sl_rot.ox; + planeMsg->pose.rotation.y = sl_rot.oy; + planeMsg->pose.rotation.z = sl_rot.oz; + planeMsg->pose.rotation.w = sl_rot.ow; + + planeMsg->pose.translation.x = sl_tr.x; + planeMsg->pose.translation.y = sl_tr.y; + planeMsg->pose.translation.z = sl_tr.z; + + // Plane Bounds + std::vector sl_bounds = plane.getBounds(); + planeMsg->bounds.points.resize(sl_bounds.size()); + memcpy( + planeMsg->bounds.points.data(), sl_bounds.data(), + 3 * sl_bounds.size() * sizeof(float)); + + // Plane mesh + sl::Mesh sl_mesh = plane.extractMesh(); + size_t triangCount = sl_mesh.triangles.size(); + size_t ptsCount = sl_mesh.vertices.size(); + planeMsg->mesh.triangles.resize(triangCount); + planeMsg->mesh.vertices.resize(ptsCount); + + // memcpy not allowed because data types are different + for (size_t i = 0; i < triangCount; i++) { + planeMsg->mesh.triangles[i].vertex_indices[0] = sl_mesh.triangles[i][0]; + planeMsg->mesh.triangles[i].vertex_indices[1] = sl_mesh.triangles[i][1]; + planeMsg->mesh.triangles[i].vertex_indices[2] = sl_mesh.triangles[i][2]; + } + + // memcpy not allowed because data types are different + for (size_t i = 0; i < ptsCount; i++) { + planeMsg->mesh.vertices[i].x = sl_mesh.vertices[i][0]; + planeMsg->mesh.vertices[i].y = sl_mesh.vertices[i][1]; + planeMsg->mesh.vertices[i].z = sl_mesh.vertices[i][2]; + } + + DEBUG_STREAM_MAP("Publishing PLANE message"); + try { + if (mPubPlane) {mPubPlane->publish(std::move(planeMsg));} + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + // <---- Publish the plane as custom message + } +} + +void ZedCamera::callback_setRoi( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Set ROI service called **"); + + if (mDepthDisabled) { + std::string err_msg = + "Error while setting ZED SDK region of interest: depth processing is " + "disabled!"; + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + res->message = err_msg; + res->success = false; + return; + } + + RCLCPP_INFO_STREAM(get_logger(), " * ROI string: " << req->roi.c_str()); + + if (req->roi == "") { + std::string err_msg = + "Error while setting ZED SDK region of interest: a vector of " + "normalized points describing a " + "polygon is required. e.g. " + "'[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]'"; + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + res->message = err_msg; + res->success = false; + return; + } + + std::string error; + std::vector> parsed_poly = + sl_tools::parseStringMultiVector_float(req->roi, error); + + if (error != "") { + std::string err_msg = "Error while setting ZED SDK region of interest: "; + err_msg += error; + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + res->message = err_msg; + res->success = false; + return; + } + + // ----> Set Region of Interest + // Create mask + RCLCPP_INFO(get_logger(), " * Setting ROI"); + std::vector sl_poly; + parseRoiPoly(parsed_poly, sl_poly); + + sl::Resolution resol(mCamWidth, mCamHeight); + sl::Mat roi_mask(resol, sl::MAT_TYPE::U8_C1, sl::MEM::CPU); + if (!sl_tools::generateROI(sl_poly, roi_mask)) { + std::string err_msg = + "Error generating the region of interest image mask. "; + err_msg += error; + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + res->message = err_msg; + res->success = false; + return; + } else { + sl::ERROR_CODE err = mZed->setRegionOfInterest(roi_mask); + if (err != sl::ERROR_CODE::SUCCESS) { + std::string err_msg = + "Error while setting ZED SDK region of interest: "; + err_msg += sl::toString(err).c_str(); + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + mManualRoiEnabled = false; + + + if (_nitrosDisabled) { + if (mPubRoiMask.getTopic().empty()) { + mPubRoiMask = image_transport::create_publisher( + this, mRoiMaskTopic, mQos.get_rmw_qos_profile()); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mPubRoiMask.getTopic()); + } + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + if (!mNitrosPubRoiMask) { + mNitrosPubRoiMask = std::make_shared< + nvidia::isaac_ros::nitros::ManagedNitrosPublisher< + nvidia::isaac_ros::nitros::NitrosImage>>( + this, mRoiMaskTopic, + nvidia::isaac_ros::nitros::nitros_image_bgra8_t:: + supported_type_name, + nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig(), mQos); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mRoiMaskTopic); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << mRoiMaskTopic + "/nitros"); + } +#endif + } + + res->message = err_msg; + res->success = false; + return; + } else { + RCLCPP_INFO(get_logger(), " * Region of Interest correctly set."); + + mManualRoiEnabled = true; + + res->message = "Region of Interest correctly set."; + res->success = true; + return; + } + } + // <---- Set Region of Interest +} + +void ZedCamera::callback_resetRoi( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + RCLCPP_INFO(get_logger(), "** Reset ROI service called **"); + + if (mDepthDisabled) { + std::string err_msg = + "Error while resetting ZED SDK region of interest: depth processing " + "is " + "disabled!"; + + RCLCPP_WARN_STREAM(get_logger(), " * " << err_msg); + + res->message = err_msg; + res->success = false; + return; + } + + sl::Mat empty_roi; + sl::ERROR_CODE err = mZed->setRegionOfInterest(empty_roi); + + if (err != sl::ERROR_CODE::SUCCESS) { + std::string err_msg = + " * Error while resetting ZED SDK region of interest: "; + err_msg += sl::toString(err); + + RCLCPP_WARN_STREAM( + get_logger(), + " * Error while resetting ZED SDK region of interest: " << err_msg); + + mManualRoiEnabled = false; + + res->message = err_msg; + res->success = false; + } else { + RCLCPP_INFO(get_logger(), " * Region of Interest correctly reset."); + + mManualRoiEnabled = false; + + res->message = "Region of Interest correctly reset."; + res->success = true; + return; + } +} + +void ZedCamera::callback_toLL( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + RCLCPP_INFO(get_logger(), "** Map to Lat/Long service called **"); + + if (!mGnssFusionEnabled) { + RCLCPP_WARN(get_logger(), " * GNSS fusion is not enabled"); + return; + } + + if (mFusedPosTrackingStatus.gnss_fusion_status != sl::GNSS_FUSION_STATUS::OK) { + RCLCPP_WARN(get_logger(), " * GNSS fusion is not yet ready"); + return; + } + + sl::Translation map_pt; + map_pt.x = req->map_point.x; + map_pt.y = req->map_point.y; + map_pt.z = req->map_point.z; + + sl::GeoPose geo_pose; + sl::Pose map_pose; + map_pose.pose_data.setIdentity(); + map_pose.pose_data.setTranslation(map_pt); + mFusion.Camera2Geo(map_pose, geo_pose); + + res->ll_point.altitude = geo_pose.latlng_coordinates.getAltitude(); + res->ll_point.latitude = geo_pose.latlng_coordinates.getLatitude(false); + res->ll_point.longitude = geo_pose.latlng_coordinates.getLongitude(false); + + RCLCPP_INFO( + get_logger(), + "* Converted the MAP point (%.2fm,%.2fm,%.2fm)to GeoPoint " + "%.6f°,%.6f° / %.2f m", + req->map_point.x, req->map_point.y, req->map_point.z, + geo_pose.latlng_coordinates.getLatitude(false), + geo_pose.latlng_coordinates.getLongitude(false), + geo_pose.latlng_coordinates.getAltitude()); +} + +void ZedCamera::callback_fromLL( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + RCLCPP_INFO(get_logger(), "** Lat/Long to Map service called **"); + + if (!mGnssFusionEnabled) { + RCLCPP_WARN(get_logger(), " * GNSS fusion is not enabled"); + return; + } + + if (mFusedPosTrackingStatus.gnss_fusion_status != + sl::GNSS_FUSION_STATUS::OK) + { + RCLCPP_WARN(get_logger(), " * GNSS fusion is not ready"); + return; + } + + sl::LatLng ll_pt; + ll_pt.setCoordinates( + req->ll_point.latitude, req->ll_point.longitude, + req->ll_point.altitude, false); + + sl::Pose sl_pt_cam; + mFusion.Geo2Camera(ll_pt, sl_pt_cam); + + // the point is already in the MAP Frame as it is converted from a GeoPoint + res->map_point.x = sl_pt_cam.getTranslation().x; + res->map_point.y = sl_pt_cam.getTranslation().y; + res->map_point.z = sl_pt_cam.getTranslation().z; + + RCLCPP_INFO( + get_logger(), + "* Converted the GeoPoint %.6f°,%.6f° / %.2f m to MAP point " + "(%.2fm,%.2fm,%.2fm)", + ll_pt.getLatitude(false), ll_pt.getLongitude(false), + ll_pt.getAltitude(), res->map_point.x, res->map_point.y, + res->map_point.z); +} + +void ZedCamera::callback_clock( + const rosgraph_msgs::msg::Clock::SharedPtr msg) +{ + DEBUG_SIM("=== CLOCK CALLBACK ==="); + rclcpp::Time msg_time(msg->clock, RCL_ROS_TIME); + + try { + if (msg_time != mLastClock) { + mClockAvailable = true; + DEBUG_SIM("Received an updated '/clock' message."); + } else { + mClockAvailable = false; + DEBUG_SIM("Received a NOT updated '/clock' message."); + } + mLastClock = msg_time; + } catch (...) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error comparing clock messages: " + << static_cast(msg_time.get_clock_type()) + << " vs " + << static_cast(mLastClock.get_clock_type())); + + mClockAvailable = false; + } +} + +void ZedCamera::processRtRoi(rclcpp::Time ts) +{ + if (!mAutoRoiEnabled && !mManualRoiEnabled) { + mAutoRoiStatus = sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE::NOT_ENABLED; + return; + } + + if (mAutoRoiEnabled) { + auto prevStatus = mAutoRoiStatus; + mAutoRoiStatus = mZed->getRegionOfInterestAutoDetectionStatus(); + DEBUG_STREAM_ROI("Automatic ROI Status:" << sl::toString(mAutoRoiStatus)); + if (prevStatus == + sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE::RUNNING && + mAutoRoiStatus == + sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE::READY) + { + RCLCPP_INFO( + get_logger(), + "Region of interest auto detection is done!"); + } + } + + if (mAutoRoiStatus == sl::REGION_OF_INTEREST_AUTO_DETECTION_STATE::READY || + mManualRoiEnabled) + { + uint8_t subCount = 0; + + try { + if (_nitrosDisabled) { + subCount = mPubRoiMask.getNumSubscribers(); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + subCount = count_subscribers(mRoiMaskTopic); +#endif + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("processRtRoi: Exception while counting subscribers"); + return; + } + + if (subCount > 0) { + DEBUG_ROI("Retrieve ROI Mask"); + sl::Mat roi_mask; + mZed->getRegionOfInterest(roi_mask); + + DEBUG_ROI("Publish ROI Mask"); + + if (_nitrosDisabled) { + publishImageWithInfo( + roi_mask, mPubRoiMask, mPubRoiMaskCamInfo, mPubRoiMaskCamInfoTrans, mLeftCamInfoMsg, + mLeftCamOptFrameId, ts); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + roi_mask, mNitrosPubRoiMask, mPubRoiMaskCamInfo, mPubRoiMaskCamInfoTrans, mLeftCamInfoMsg, + mLeftCamOptFrameId, ts); +#endif + } + } + } +} + +bool ZedCamera::startStreamingServer() +{ + DEBUG_STR("Starting streaming server"); + + if (mZed->isStreamingEnabled()) { + mZed->disableStreaming(); + RCLCPP_WARN(get_logger(), "A streaming server was already running and has been stopped"); + } + + sl::StreamingParameters params; + params.adaptative_bitrate = mStreamingServerAdaptiveBitrate; + params.bitrate = mStreamingServerBitrate; + params.chunk_size = mStreamingServerChunckSize; + params.codec = mStreamingServerCodec; + params.gop_size = mStreamingServerGopSize; + params.port = mStreamingServerPort; + params.target_framerate = mStreamingServerTargetFramerate; + + sl::ERROR_CODE res; + res = mZed->enableStreaming(params); + if (res != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error starting the Streaming server: " << sl::toString( + res) << " - " << sl::toVerbose(res)); + mStreamingServerRunning = false; + } else { + mStreamingServerRunning = true; + RCLCPP_INFO(get_logger(), "Streaming server started"); + } + return mStreamingServerRunning; +} + +void ZedCamera::stopStreamingServer() +{ + if (mZed->isStreamingEnabled()) { + mZed->disableStreaming(); + RCLCPP_INFO(get_logger(), "Streaming server stopped"); + } else { + RCLCPP_WARN(get_logger(), "A streaming server was not enabled."); + } + + mStreamingServerRunning = false; + mStreamingServerRequired = false; +} + +void ZedCamera::publishHealthStatus() +{ + if (!mPubHealthStatus) { + return; + } + + if (mImageValidityCheck <= 0 || !mPublishStatus) { + return; + } + + size_t sub_count = 0; + try { + sub_count = mPubHealthStatus->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("publishHealthStatus: Exception while counting subscribers"); + return; + } + + if (sub_count == 0) { + return; + } + + sl::HealthStatus status = mZed->getHealthStatus(); + auto msg = std::make_unique(); + msg->header.stamp = mUsePubTimestamps ? + get_clock()->now() : + mFrameTimestamp; + msg->header.frame_id = mBaseFrameId; + msg->serial_number = mCamSerialNumber; + msg->camera_name = mCameraName; + msg->low_image_quality = status.low_image_quality; + msg->low_lighting = status.low_lighting; + msg->low_depth_reliability = status.low_depth_reliability; + msg->low_motion_sensors_reliability = + status.low_motion_sensors_reliability; + + mPubHealthStatus->publish(std::move(msg)); +} + +bool ZedCamera::publishSvoStatus(uint64_t frame_ts) +{ + if (!mSvoMode) { + return false; + } + + if (!mPubSvoStatus) { + return false; + } + + size_t subCount = 0; + try { + subCount = mPubSvoStatus->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("publishSvoStatus: Exception while counting subscribers"); + return false; + } + + if (subCount > 0) { + auto msg = std::make_unique(); + + // ----> Fill the status message + msg->file_name = mSvoFilepath; + msg->frame_id = mZed->getSVOPosition(); + msg->total_frames = mZed->getSVONumberOfFrames(); + msg->frame_ts = frame_ts; + + if (mSvoPause) { + msg->status = zed_msgs::msg::SvoStatus::STATUS_PAUSED; + } else if (mGrabStatus == sl::ERROR_CODE::END_OF_SVOFILE_REACHED) { + msg->frame_id = msg->total_frames - 1; + msg->status = zed_msgs::msg::SvoStatus::STATUS_END; + } else { + msg->status = zed_msgs::msg::SvoStatus::STATUS_PLAYING; + } + + msg->loop_active = mSvoLoop; + msg->loop_count = mSvoLoopCount; + msg->real_time_mode = mSvoRealtime; + // <---- Fill the status message + + // Publish the message + if (mPubSvoStatus) {mPubSvoStatus->publish(std::move(msg));} + return true; + } + return false; +} + +void ZedCamera::callback_pubHeartbeat() +{ + if (!mPubHeartbeatStatus) { + return; + } + + if (!mPublishStatus) { + return; + } + + if (mThreadStop) { + return; + } + + // ----> Count the subscribers + size_t sub_count = 0; + try { + sub_count = mPubHeartbeatStatus->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("publishHeartbeat: Exception while counting subscribers"); + return; + } + + if (sub_count == 0) { + return; + } + // <---- Count the subscribers + + // ----> Fill the message + auto msg = std::make_unique(); + msg->beat_count = ++mHeartbeatCount; + msg->camera_sn = mCamSerialNumber; + msg->full_name = this->get_fully_qualified_name(); + msg->node_name = this->get_name(); + msg->node_ns = this->get_namespace(); + msg->simul_mode = mSimMode; + msg->svo_mode = mSvoMode; + // <---- Fill the message + + // Publish the heartbeat + if (mPubHeartbeatStatus) {mPubHeartbeatStatus->publish(std::move(msg));} +} + +void ZedCamera::publishClock(const sl::Timestamp & ts) +{ + DEBUG_COMM("Publishing clock"); + + if (!mPubClock) { + return; + } + + size_t subCount = 0; + try { + subCount = mPubClock->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_COMM("publishClock: Exception while counting subscribers"); + return; + } + + if (subCount == 0) { + return; + } + + auto msg = std::make_unique(); + msg->clock = sl_tools::slTime2Ros(ts); + + if (mPubClock) {mPubClock->publish(std::move(msg));} +} + +} // namespace stereolabs + +#include "rclcpp_components/register_node_macro.hpp" + +// Register the component with class_loader. +// This acts as a sort of entry point, allowing the component to be discoverable +// when its library is being loaded into a running process. +RCLCPP_COMPONENTS_REGISTER_NODE(stereolabs::ZedCamera) diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_objdet.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_objdet.cpp new file mode 100644 index 000000000..46b6e6b02 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_objdet.cpp @@ -0,0 +1,1211 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_component.hpp" +#include "sl_logging.hpp" +#include "sl_tools.hpp" + +namespace stereolabs +{ + +void ZedCamera::getOdParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== OBJECT DETECTION parameters ==="); + if (sl_tools::isZED(mCamUserModel)) { + RCLCPP_WARN(get_logger(), "!!! OD parameters are not used with ZED!!!"); + return; + } + + sl_tools::getParam( + shared_from_this(), "object_detection.od_enabled", + mObjDetEnabled, mObjDetEnabled, + " * Object Det. enabled: "); + + sl_tools::getParam( + shared_from_this(), + "object_detection.allow_reduced_precision_inference", + mObjDetReducedPrecision, mObjDetReducedPrecision); + RCLCPP_INFO_STREAM( + get_logger(), + " * Object Det. allow reduced precision: " + << (mObjDetReducedPrecision ? "TRUE" : "FALSE")); + sl_tools::getParam( + shared_from_this(), "object_detection.max_range", + mObjDetMaxRange, mObjDetMaxRange, + " * Object Det. maximum range [m]: ", false, 0.1, 40.0); + sl_tools::getParam( + shared_from_this(), "object_detection.prediction_timeout", + mObjDetPredTimeout, mObjDetPredTimeout, + " * Object Det. prediction timeout [sec]: ", false, 0.0, 300.0); + sl_tools::getParam( + shared_from_this(), "object_detection.enable_tracking", + mObjDetTracking, mObjDetTracking); + RCLCPP_INFO_STREAM( + get_logger(), " * Object Det. tracking: " + << (mObjDetTracking ? "TRUE" : "FALSE")); + + sl_tools::getEnumParam( + shared_from_this(), "object_detection.filtering_mode", "NONE", + sl::OBJECT_FILTERING_MODE::NONE, + sl::OBJECT_FILTERING_MODE::LAST, mObjFilterMode, + " * Object Filtering mode: "); + + // ----> Object Detection model + if (!sl_tools::getEnumParam( + shared_from_this(), "object_detection.detection_model", + "MULTI_CLASS_BOX_FAST", + sl::OBJECT_DETECTION_MODEL::MULTI_CLASS_BOX_FAST, + sl::OBJECT_DETECTION_MODEL::LAST, mObjDetModel)) + { + RCLCPP_ERROR(get_logger(), "Stopping the node."); + exit(EXIT_FAILURE); + } + if (mObjDetModel == sl::OBJECT_DETECTION_MODEL::CUSTOM_BOX_OBJECTS) { + RCLCPP_ERROR_STREAM( + get_logger(), + "The value of the parameter 'object_detection.detection_model' is not supported: '" + << sl::toString(mObjDetModel).c_str() << "'. Stopping the node"); + exit(EXIT_FAILURE); + } + RCLCPP_INFO_STREAM( + get_logger(), " * Object Det. model: " + << sl::toString(mObjDetModel).c_str()); + + if (mObjDetModel == sl::OBJECT_DETECTION_MODEL::CUSTOM_YOLOLIKE_BOX_OBJECTS) { + mUsingCustomOd = true; + } else { + mUsingCustomOd = false; + } + // <---- Object Detection model + + if (mUsingCustomOd) { + getCustomOdParams(); + } else { + // ----> MultiClassBox parameters + sl_tools::getParam( + shared_from_this(), + "object_detection.class.people.enabled", + mObjDetPeopleEnable, mObjDetPeopleEnable, + " * MultiClassBox people: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.vehicle.enabled", + mObjDetVehiclesEnable, mObjDetVehiclesEnable, + " * MultiClassBox vehicles: ", true); + sl_tools::getParam( + shared_from_this(), "object_detection.class.bag.enabled", + mObjDetBagsEnable, mObjDetBagsEnable, + " * MultiClassBox bags: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.animal.enabled", + mObjDetAnimalsEnable, mObjDetAnimalsEnable, + " * MultiClassBox animals: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.electronics.enabled", + mObjDetElectronicsEnable, mObjDetElectronicsEnable, + " * MultiClassBox electronics: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.fruit_vegetable.enabled", + mObjDetFruitsEnable, mObjDetFruitsEnable, + " * MultiClassBox fruits and vegetables: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.sport.enabled", + mObjDetSportEnable, mObjDetSportEnable, + " * MultiClassBox sport-related objects: ", true); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.people.confidence_threshold", + mObjDetPeopleConf, mObjDetPeopleConf, + " * MultiClassBox people confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.vehicle.confidence_threshold", + mObjDetVehiclesConf, mObjDetVehiclesConf, + " * MultiClassBox vehicles confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.bag.confidence_threshold", + mObjDetBagsConf, mObjDetBagsConf, + " * MultiClassBox bags confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.animal.confidence_threshold", + mObjDetAnimalsConf, mObjDetAnimalsConf, + " * MultiClassBox animals confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.electronics.confidence_threshold", + mObjDetElectronicsConf, mObjDetElectronicsConf, + " * MultiClassBox electronics confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.fruit_vegetable.confidence_threshold", + mObjDetFruitsConf, mObjDetFruitsConf, + " * MultiClassBox fruits and vegetables confidence: ", true, 0.0, 100.0); + sl_tools::getParam( + shared_from_this(), + "object_detection.class.sport.confidence_threshold", + mObjDetSportConf, mObjDetSportConf, + " * MultiClassBox sport-related objects confidence: ", true, 0.0, 100.0); + // <---- MultiClassBox parameters + } +} + +void ZedCamera::getCustomOdParams() +{ + sl_tools::getParam( + shared_from_this(), "object_detection.custom_onnx_file", + mYoloOnnxPath, mYoloOnnxPath, " * Custom ONNX file: "); + if (mYoloOnnxPath.empty()) { + RCLCPP_ERROR_STREAM( + get_logger(), + "The parameter 'object_detection.custom_onnx_file' is empty. " + "Please check the value in the YAML file."); + exit(EXIT_FAILURE); + } + sl_tools::getParam( + shared_from_this(), + "object_detection.custom_onnx_input_size", mYoloOnnxSize, + mYoloOnnxSize, " * Custom ONNX input size: ", false, 128, 2048); + sl_tools::getParam( + shared_from_this(), + "object_detection.custom_class_count", mCustomClassCount, + mCustomClassCount, " * Custom ONNX class count: ", false, 1, 999); + + for (int i = 0; i < mCustomClassCount; i++) { + std::string param_name; + std::string label = ""; + int class_id = i; + + std::stringstream class_prefix; + class_prefix << "class_"; + class_prefix.width(3); + class_prefix.fill('0'); + class_prefix << i; + + std::string param_prefix = std::string("object_detection.") + class_prefix.str() + "."; + + param_name = param_prefix + "label"; + sl_tools::getParam( + shared_from_this(), param_name, label, label, std::string( + " * ") + param_name + ": ", false); + + param_name = param_prefix + "model_class_id"; + sl_tools::getParam( + shared_from_this(), param_name, class_id, class_id, std::string( + " * ") + param_name + ": ", false, 0, 999); + + mCustomClassIdMap[class_prefix.str()] = class_id; // Update the class prefix to class_id mapping + DEBUG_STREAM_OD(" Mapped '" << class_prefix.str() << "' to ID '" << class_id << "'"); + mCustomLabels[class_id] = label; // Update the class id to label mapping + DEBUG_STREAM_OD(" Mapped ID '" << class_id << "' to '" << label << "'"); + + sl::CustomObjectDetectionProperties customOdProperties; + + param_name = param_prefix + "enabled"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.enabled, customOdProperties.enabled, std::string( + " * ") + param_name + ": ", true); + + param_name = param_prefix + "confidence_threshold"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.detection_confidence_threshold, customOdProperties.detection_confidence_threshold, std::string( + " * ") + param_name + ": ", true, 0.0f, 100.0f); + param_name = param_prefix + "is_grounded"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.is_grounded, customOdProperties.is_grounded, std::string( + " * ") + param_name + ": ", true); + param_name = param_prefix + "is_static"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.is_static, customOdProperties.is_static, std::string( + " * ") + param_name + ": ", true); + param_name = param_prefix + "tracking_timeout"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.tracking_timeout, customOdProperties.tracking_timeout, std::string( + " * ") + param_name + ": ", true, -1.0f, + 300.0f); + param_name = param_prefix + "tracking_max_dist"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.tracking_max_dist, customOdProperties.tracking_max_dist, std::string( + " * ") + param_name + ": ", true, -1.0f, + 100.0f); + param_name = param_prefix + "max_box_width_normalized"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.max_box_width_normalized, customOdProperties.max_box_width_normalized, std::string( + " * ") + param_name + ": ", true, -1.0f, + 1.0f); + param_name = param_prefix + "min_box_width_normalized"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.min_box_width_normalized, customOdProperties.min_box_width_normalized, std::string( + " * ") + param_name + ": ", true, -1.0f, + 1.0f); + param_name = param_prefix + "max_box_height_normalized"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.max_box_height_normalized, customOdProperties.max_box_height_normalized, std::string( + " * ") + param_name + ": ", true, -1.0f, + 1.0f); + param_name = param_prefix + "min_box_height_normalized"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.min_box_height_normalized, customOdProperties.min_box_height_normalized, std::string( + " * ") + param_name + ": ", true, -1.0f, + 1.0f); + param_name = param_prefix + "max_box_width_meters"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.max_box_width_meters, customOdProperties.max_box_width_meters, + std::string(" * ") + param_name + ": ", true, -1.0f, + 10000.0f); + param_name = param_prefix + "min_box_width_meters"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.min_box_width_meters, customOdProperties.min_box_width_meters, + std::string(" * ") + param_name + ": ", true, -1.0f, + 10000.0f); + param_name = param_prefix + "max_box_height_meters"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.max_box_height_meters, customOdProperties.max_box_height_meters, std::string( + " * ") + param_name + ": ", true, -1.0f, + 10000.0f); + param_name = param_prefix + "max_allowed_acceleration"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.max_allowed_acceleration, customOdProperties.max_allowed_acceleration, std::string( + " * ") + param_name + ": ", true, 0.0f, + 100000.0f); + param_name = param_prefix + "velocity_smoothing_factor"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.object_tracking_parameters.velocity_smoothing_factor, customOdProperties.object_tracking_parameters.velocity_smoothing_factor, std::string( + " * ") + param_name + ": ", true, 0.0f, + 1.0f); + param_name = param_prefix + "min_velocity_threshold"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.object_tracking_parameters.min_velocity_threshold, customOdProperties.object_tracking_parameters.min_velocity_threshold, std::string( + " * ") + param_name + ": ", true, 0.0f, + 100.0f); + param_name = param_prefix + "prediction_timeout_s"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.object_tracking_parameters.prediction_timeout_s, customOdProperties.object_tracking_parameters.prediction_timeout_s, std::string( + " * ") + param_name + ": ", true, 0.0f, + 100.0f); + param_name = param_prefix + "min_confirmation_time_s"; + sl_tools::getParam( + shared_from_this(), param_name, + customOdProperties.object_tracking_parameters.min_confirmation_time_s, customOdProperties.object_tracking_parameters.min_confirmation_time_s, std::string( + " * ") + param_name + ": ", true, 0.0f, + 100.0f); + + param_name = param_prefix + "object_acceleration_preset"; + sl_tools::getEnumParam( + shared_from_this(), param_name, "DEFAULT", + sl::OBJECT_ACCELERATION_PRESET::DEFAULT, + sl::OBJECT_ACCELERATION_PRESET::LAST, + customOdProperties.object_tracking_parameters.object_acceleration_preset, + std::string(" * ") + param_name + ": "); + + mCustomOdProperties[class_id] = customOdProperties; // Update the Custom OD Properties information + } + +} + +bool ZedCamera::handleOdDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + DEBUG_OD("handleOdDynamicParams"); + + if (param.get_name() == "object_detection.class.people.enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetPeopleEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetPeopleEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == "object_detection.class.vehicle.enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetVehiclesEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetVehiclesEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == "object_detection.class.bag.enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetBagsEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetBagsEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == "object_detection.class.animal.enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetAnimalsEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetAnimalsEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == + "object_detection.class.electronics.enabled") + { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetElectronicsEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetElectronicsEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == + "object_detection.class.fruit_vegetable.enabled") + { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetFruitsEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetFruitsEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == "object_detection.class.sport.enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mObjDetSportEnable = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mObjDetSportEnable ? "TRUE" : "FALSE")); + } else if (param.get_name() == "object_detection.class.people.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetPeopleConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetPeopleConf); + } else if (param.get_name() == "object_detection.class.vehicle.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetVehiclesConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetVehiclesConf); + } else if (param.get_name() == "object_detection.class.bag.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetBagsConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetBagsConf); + } else if (param.get_name() == "object_detection.class.animal.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetAnimalsConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetAnimalsConf); + } else if (param.get_name() == "object_detection.class.electronics.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + if (!sl_tools::checkParamRange( + param, mObjDetElectronicsConf, 0.0, 100.0, result, + get_logger())) + { + return false; + } + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetElectronicsConf); + } else if (param.get_name() == "object_detection.class.fruit_vegetable.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetFruitsConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetFruitsConf); + } else if (param.get_name() == "object_detection.class.sport.confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange(param, mObjDetSportConf, 0.0, 100.0, result, get_logger())) { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mObjDetSportConf); + } + + mObjDetRtParamsDirty = true; + return true; +} + +bool ZedCamera::handleCustomOdDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + DEBUG_COMM("handleCustomOdDynamicParams"); + + std::string param_full_name = param.get_name(); + DEBUG_STREAM_COMM("handleCustomOdDynamicParams: Parameter name: " << param_full_name); + + if (param_full_name.find("object_detection.class_") == std::string::npos) { + DEBUG_STREAM_COMM( + "handleCustomOdDynamicParams: Parameter '" << param_full_name << + "' is not a custom object detection parameter"); + return true; + } + + // Get the class ID from the parameter name + std::string class_id_str = param_full_name.substr(param_full_name.find("class_"), 9); + DEBUG_STREAM_COMM("handleCustomOdDynamicParams: Class ID: " << class_id_str); + + auto it = mCustomClassIdMap.find(class_id_str); + if (it == mCustomClassIdMap.end()) { + DEBUG_STREAM_COMM( + "handleCustomOdDynamicParams: Class ID '" + << class_id_str << "' not found"); + return false; + } + int class_id = it->second; + DEBUG_STREAM_COMM("handleCustomOdDynamicParams: Class ID: " << class_id); + + std::string param_name = param_full_name.substr(param_full_name.find_last_of('.') + 1); + DEBUG_STREAM_COMM("handleCustomOdDynamicParams: Custom OD Parameter name: " << param_name); + + if (param_name == "enabled") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].enabled = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mCustomOdProperties[class_id].enabled ? "TRUE" : "FALSE")); + } else if (param_name == "confidence_threshold") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + if (!sl_tools::checkParamRange( + param, mCustomOdProperties[class_id].detection_confidence_threshold, + 0.0f, 100.0f, result, get_logger())) + { + return false; + } + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].detection_confidence_threshold); + } else if (param_name == "is_grounded") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].is_grounded = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mCustomOdProperties[class_id].is_grounded ? "TRUE" : "FALSE")); + } else if (param_name == "is_static") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].is_static = param.as_bool(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << (mCustomOdProperties[class_id].is_static ? "TRUE" : "FALSE")); + } else if (param_name == "tracking_timeout") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].tracking_timeout = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].tracking_timeout); + } else if (param_name == "tracking_max_dist") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].tracking_max_dist = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].tracking_max_dist); + } else if (param_name == "max_box_width_normalized") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].max_box_width_normalized = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].max_box_width_normalized); + } else if (param_name == "min_box_width_normalized") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].min_box_width_normalized = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].min_box_width_normalized); + } else if (param_name == "max_box_height_normalized") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].max_box_height_normalized = param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].max_box_height_normalized); + } else if (param_name == "min_box_height_normalized") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].min_box_height_normalized = param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].min_box_height_normalized); + } else if (param_name == "max_box_width_meters") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].max_box_width_meters = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].max_box_width_meters); + } else if (param_name == "min_box_width_meters") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + + mCustomOdProperties[class_id].min_box_width_meters = param.as_double(); + + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].min_box_width_meters); + } else if (param_name == "max_box_height_meters") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].max_box_height_meters = param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].max_box_height_meters); + } else if (param_name == "max_allowed_acceleration") { + rclcpp::ParameterType + correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].max_allowed_acceleration = param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].max_allowed_acceleration); + } else if (param_name == "velocity_smoothing_factor") { + rclcpp::ParameterType + correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].object_tracking_parameters.velocity_smoothing_factor = + param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].object_tracking_parameters.velocity_smoothing_factor); + } else if (param_name == "min_velocity_threshold") { + rclcpp::ParameterType + correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].object_tracking_parameters.min_velocity_threshold = + param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].object_tracking_parameters.min_velocity_threshold); + } else if (param_name == "prediction_timeout_s") { + rclcpp::ParameterType + correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].object_tracking_parameters.prediction_timeout_s = + param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].object_tracking_parameters.prediction_timeout_s); + } else if (param_name == "min_confirmation_time_s") { + rclcpp::ParameterType + correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = + param.get_name() + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return false; + } + mCustomOdProperties[class_id].object_tracking_parameters.min_confirmation_time_s = + param.as_double(); + DEBUG_STREAM_DYN_PARAMS( + "Parameter '" + << param.get_name() << "' correctly set to " + << mCustomOdProperties[class_id].object_tracking_parameters.min_confirmation_time_s); + } else { + RCLCPP_WARN_STREAM(get_logger(), "Unknown parameter: " << param.get_name()); + } + + mObjDetRtParamsDirty = true; + return true; +} + +bool ZedCamera::startObjDetect() +{ + DEBUG_OD("startObjDetect"); + + if (!sl_tools::isObjDetAvailable(mCamRealModel)) { + RCLCPP_ERROR( + get_logger(), + "Object detection not started. The camera model does not " + "support it with the current version " + "of the " + "SDK"); + return false; + } + + if (mDepthDisabled) { + RCLCPP_WARN( + get_logger(), + "Cannot start Object Detection if " + "`depth.depth_mode` is set to `0` [NONE]"); + return false; + } + + if (!mObjDetEnabled) { + return false; + } + + RCLCPP_INFO(get_logger(), "=== Starting Object Detection ==="); + + sl::ObjectDetectionParameters od_p; + od_p.enable_segmentation = false; + od_p.enable_tracking = mObjDetTracking; + od_p.detection_model = mObjDetModel; + od_p.filtering_mode = mObjFilterMode; + od_p.prediction_timeout_s = mObjDetPredTimeout; + od_p.allow_reduced_precision_inference = mObjDetReducedPrecision; + od_p.max_range = mObjDetMaxRange; + + if (mUsingCustomOd) { + od_p.custom_onnx_dynamic_input_shape = mYoloOnnxSize; + od_p.custom_onnx_file = mYoloOnnxPath; + } + + sl::ERROR_CODE objDetError = mZed->enableObjectDetection(od_p); + if (objDetError != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Object detection error: " << sl::toString(objDetError)); + + mObjDetRunning = false; + return false; + } + + if (!mPubObjDet) { + mPubObjDet = create_publisher( + mObjectDetTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic " << mPubObjDet->get_topic_name()); + } + + mObjDetRunning = true; + return true; +} + +void ZedCamera::stopObjDetect() +{ + if (mObjDetRunning) { + RCLCPP_INFO(get_logger(), "=== Stopping Object Detection ==="); + mObjDetRunning = false; + mObjDetEnabled = false; + mZed->disableObjectDetection(); + + // ----> Send an empty message to indicate that no more objects are tracked + // (e.g clean RVIZ2) + auto objMsg = std::make_unique(); + + objMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + objMsg->header.frame_id = mLeftCamFrameId; + + objMsg->objects.clear(); + + DEBUG_STREAM_OD( + "Publishing EMPTY OBJ message " + << mPubObjDet->get_topic_name()); + try { + mPubObjDet->publish(std::move(objMsg)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what() ); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + // <---- Send an empty message to indicate that no more objects are tracked + // (e.g clean RVIZ2) + } +} + +void ZedCamera::processDetectedObjects(rclcpp::Time t) +{ + size_t objdet_sub_count = 0; + + try { + if (mPubObjDet) { + objdet_sub_count = count_subscribers(mPubObjDet->get_topic_name()); + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_OD( + "processDetectedObjects: Exception while counting subscribers"); + return; + } + + if (objdet_sub_count < 1) { + mObjDetSubscribed = false; + return; + } + + sl_tools::StopWatch odElabTimer(get_clock()); + + mObjDetSubscribed = true; + + sl::Objects objects; + sl::ERROR_CODE objDetRes; + + // ----> Update runtime parameters only when changed (B2 optimization) + if (mObjDetRtParamsDirty) { + if (!mUsingCustomOd || ZED_SDK_MAJOR_VERSION < 5) { + sl::ObjectDetectionRuntimeParameters objectTracker_parameters_rt; + objectTracker_parameters_rt.detection_confidence_threshold = 50.0f; + mObjDetFilter.clear(); + mObjDetClassConfMap.clear(); + if (mObjDetPeopleEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::PERSON); + mObjDetClassConfMap[sl::OBJECT_CLASS::PERSON] = mObjDetPeopleConf; + } + if (mObjDetVehiclesEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::VEHICLE); + mObjDetClassConfMap[sl::OBJECT_CLASS::VEHICLE] = mObjDetVehiclesConf; + } + if (mObjDetBagsEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::BAG); + mObjDetClassConfMap[sl::OBJECT_CLASS::BAG] = mObjDetBagsConf; + } + if (mObjDetAnimalsEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::ANIMAL); + mObjDetClassConfMap[sl::OBJECT_CLASS::ANIMAL] = mObjDetAnimalsConf; + } + if (mObjDetElectronicsEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::ELECTRONICS); + mObjDetClassConfMap[sl::OBJECT_CLASS::ELECTRONICS] = mObjDetElectronicsConf; + } + if (mObjDetFruitsEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::FRUIT_VEGETABLE); + mObjDetClassConfMap[sl::OBJECT_CLASS::FRUIT_VEGETABLE] = + mObjDetFruitsConf; + } + if (mObjDetSportEnable) { + mObjDetFilter.push_back(sl::OBJECT_CLASS::SPORT); + mObjDetClassConfMap[sl::OBJECT_CLASS::SPORT] = mObjDetSportConf; + } + objectTracker_parameters_rt.object_class_filter = mObjDetFilter; + objectTracker_parameters_rt.object_class_detection_confidence_threshold = mObjDetClassConfMap; + mZed->setObjectDetectionRuntimeParameters(objectTracker_parameters_rt); + } +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 50 + else { + sl::CustomObjectDetectionRuntimeParameters custom_objectTracker_parameters_rt; + custom_objectTracker_parameters_rt.object_class_detection_properties = mCustomOdProperties; + mZed->setCustomObjectDetectionRuntimeParameters(custom_objectTracker_parameters_rt); + } +#endif + mObjDetRtParamsDirty = false; + } + // <---- Update runtime parameters only when changed + + objDetRes = mZed->retrieveObjects(objects); + + if (objDetRes != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Object Detection error: " << sl::toString(objDetRes)); + return; + } + + if (!objects.is_new) { // Async object detection. Update data only if new + // detection is available + DEBUG_OD("No new detected objects"); + return; + } + + DEBUG_STREAM_OD("Detected " << objects.object_list.size() << " objects"); + + size_t objCount = objects.object_list.size(); + + auto objMsg = std::make_unique(); + + objMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + objMsg->header.frame_id = mLeftCamFrameId; + + objMsg->objects.resize(objCount); + + size_t idx = 0; + for (const auto & data : objects.object_list) { + if (!mUsingCustomOd) { + objMsg->objects[idx].label = sl::toString(data.label).c_str(); + objMsg->objects[idx].sublabel = sl::toString(data.sublabel).c_str(); + } else { + auto it = mCustomLabels.find(data.raw_label); + objMsg->objects[idx].label = (it != mCustomLabels.end()) ? it->second : std::string(); + objMsg->objects[idx].sublabel = std::to_string(data.raw_label); + } + + objMsg->objects[idx].label_id = data.id; + objMsg->objects[idx].confidence = data.confidence; + + DEBUG_STREAM_OD( + " * Object ID:" << data.id << " - " << + objMsg->objects[idx].label << " [" << objMsg->objects[idx].sublabel << "] - Confidence: " << + objMsg->objects[idx].confidence); + + memcpy( + &(objMsg->objects[idx].position[0]), &(data.position[0]), + 3 * sizeof(float)); + memcpy( + &(objMsg->objects[idx].position_covariance[0]), + &(data.position_covariance[0]), 6 * sizeof(float)); + memcpy( + &(objMsg->objects[idx].velocity[0]), &(data.velocity[0]), + 3 * sizeof(float)); + + objMsg->objects[idx].tracking_available = mObjDetTracking; + objMsg->objects[idx].tracking_state = + static_cast(data.tracking_state); + objMsg->objects[idx].action_state = + static_cast(data.action_state); + + if (data.bounding_box_2d.size() == 4) { + memcpy( + &(objMsg->objects[idx].bounding_box_2d.corners[0]), + &(data.bounding_box_2d[0]), 8 * sizeof(unsigned int)); + } + if (data.bounding_box.size() == 8) { + memcpy( + &(objMsg->objects[idx].bounding_box_3d.corners[0]), + &(data.bounding_box[0]), 24 * sizeof(float)); + } + + memcpy( + &(objMsg->objects[idx].dimensions_3d[0]), &(data.dimensions[0]), + 3 * sizeof(float)); + + if (data.head_bounding_box_2d.size() == 4) { + memcpy( + &(objMsg->objects[idx].head_bounding_box_2d.corners[0]), + &(data.head_bounding_box_2d[0]), 8 * sizeof(unsigned int)); + } + if (data.head_bounding_box.size() == 8) { + memcpy( + &(objMsg->objects[idx].head_bounding_box_3d.corners[0]), + &(data.head_bounding_box[0]), 24 * sizeof(float)); + } + + memcpy( + &(objMsg->objects[idx].head_position[0]), &(data.head_position[0]), + 3 * sizeof(float)); + + objMsg->objects[idx].skeleton_available = false; + // at the end of the loop + idx++; + } + + // DEBUG_STREAM_OD("Publishing OBJ DET message"); + try { + mPubObjDet->publish(std::move(objMsg)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + + // ----> Diagnostic information update + mObjDetElabMean_sec->addValue(odElabTimer.toc()); + mObjDetPeriodMean_sec->addValue(mOdFreqTimer.toc()); + mOdFreqTimer.tic(); + // <---- Diagnostic information update +} + +} // namespace stereolabs diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_video_depth.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_video_depth.cpp new file mode 100644 index 000000000..49e7401b2 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera/src/zed_camera_component_video_depth.cpp @@ -0,0 +1,3288 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_component.hpp" +#include "sl_logging.hpp" +#include "sl_tools.hpp" + +#include +#include +#include +#include + +#include + +namespace stereolabs +{ + +void ZedCamera::initVideoDepthPublishers() +{ + // ----> Topic name roots and suffixes + const std::string sens_rgb = "rgb/"; + const std::string sens_left = "left/"; + const std::string sens_right = "right/"; + const std::string sens_stereo = "stereo/"; + const std::string rectified = "rect/"; + const std::string raw = "raw/"; + const std::string color = "color/"; + const std::string gray = "gray/"; + const std::string type_image = "image"; + + // Helper to build topic names + auto make_topic = + [&](const std::string & sensor, const std::string & color_mode, const std::string & rect_raw, + const std::string & type) { + std::string topic = mTopicRoot + sensor + color_mode + rect_raw + type; + return get_node_topics_interface()->resolve_topic_name(topic); + }; + + // Image topics + mLeftTopic = make_topic(sens_left, color, rectified, type_image); + mLeftRawTopic = make_topic(sens_left, color, raw, type_image); + mRightTopic = make_topic(sens_right, color, rectified, type_image); + mRightRawTopic = make_topic(sens_right, color, raw, type_image); + mRgbTopic = make_topic(sens_rgb, color, rectified, type_image); + mRgbRawTopic = make_topic(sens_rgb, color, raw, type_image); + mStereoTopic = make_topic(sens_stereo, color, rectified, type_image); + mStereoRawTopic = make_topic(sens_stereo, color, raw, type_image); + mLeftGrayTopic = make_topic(sens_left, gray, rectified, type_image); + mLeftRawGrayTopic = make_topic(sens_left, gray, raw, type_image); + mRightGrayTopic = make_topic(sens_right, gray, rectified, type_image); + mRightRawGrayTopic = make_topic(sens_right, gray, raw, type_image); + mRgbGrayTopic = make_topic(sens_rgb, gray, rectified, type_image); + mRgbRawGrayTopic = make_topic(sens_rgb, gray, raw, type_image); + + // Depth topics + mDisparityTopic = mTopicRoot + "disparity/disparity_image"; + mDepthTopic = mTopicRoot + "depth/depth_registered"; + mDepthInfoTopic = mTopicRoot + "depth/depth_info"; + mConfMapTopic = mTopicRoot + "confidence/confidence_map"; + mPointcloudTopic = mTopicRoot + "point_cloud/cloud_registered"; + if (mOpenniDepthMode) { + RCLCPP_INFO(get_logger(), "OpenNI depth mode activated -> Units: mm, Encoding: MONO16"); + } + mDisparityTopic = get_node_topics_interface()->resolve_topic_name(mDisparityTopic); + mDepthTopic = get_node_topics_interface()->resolve_topic_name(mDepthTopic); + mDepthInfoTopic = get_node_topics_interface()->resolve_topic_name(mDepthInfoTopic); + mConfMapTopic = get_node_topics_interface()->resolve_topic_name(mConfMapTopic); + mPointcloudTopic = get_node_topics_interface()->resolve_topic_name(mPointcloudTopic); + + // ROI mask topic + mRoiMaskTopic = mTopicRoot + "roi_mask/image"; + mRoiMaskTopic = get_node_topics_interface()->resolve_topic_name(mRoiMaskTopic); + + // ----> Camera publishers + auto qos = mQos.get_rmw_qos_profile(); + + // Camera publishers + if (_nitrosDisabled) { + + // Publishers logging + auto log_cam_pub = [&](const auto & pub) { + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << pub.getTopic()); + std::vector transports{}; + //transports = image_transport::getLoadableTransports(); + try { + transports = image_transport::getDeclaredTransports(); + } catch (const std::exception & e) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Failed to get declared transports: " << e.what()); + } catch (...) { + RCLCPP_ERROR(get_logger(), "Unknown error while getting declared transports"); + } + + for (const auto & transport : transports) { + std::string transport_copy = transport; + auto pos = transport_copy.find('/'); + if (pos != std::string::npos) { + transport_copy.erase(0, pos); + } + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << pub.getTopic() << transport_copy + << " [image_transport]"); + } + }; + + if (mPublishImgRgb) { + mPubRgb = image_transport::create_publisher(this, mRgbTopic, qos); + log_cam_pub(mPubRgb); + if (mPublishImgGray) { + mPubRgbGray = image_transport::create_publisher(this, mRgbGrayTopic, qos); + log_cam_pub(mPubRgbGray); + } + if (mPublishImgRaw) { + mPubRawRgb = image_transport::create_publisher(this, mRgbRawTopic, qos); + log_cam_pub(mPubRawRgb); + } + if (mPublishImgRaw && mPublishImgGray) { + mPubRawRgbGray = image_transport::create_publisher(this, mRgbRawGrayTopic, qos); + log_cam_pub(mPubRawRgbGray); + } + } + if (mPublishImgLeftRight) { + mPubLeft = image_transport::create_publisher(this, mLeftTopic, qos); + log_cam_pub(mPubLeft); + mPubRight = image_transport::create_publisher(this, mRightTopic, qos); + log_cam_pub(mPubRight); + if (mPublishImgGray) { + mPubLeftGray = image_transport::create_publisher(this, mLeftGrayTopic, qos); + log_cam_pub(mPubLeftGray); + + mPubRightGray = image_transport::create_publisher(this, mRightGrayTopic, qos); + log_cam_pub(mPubRightGray); + } + if (mPublishImgRaw) { + mPubRawLeft = image_transport::create_publisher(this, mLeftRawTopic, qos); + log_cam_pub(mPubRawLeft); + mPubRawRight = image_transport::create_publisher(this, mRightRawTopic, qos); + log_cam_pub(mPubRawRight); + } + + if (mPublishImgRaw && mPublishImgGray) { + mPubRawLeftGray = image_transport::create_publisher(this, mLeftRawGrayTopic, qos); + log_cam_pub(mPubRawLeftGray); + mPubRawRightGray = image_transport::create_publisher(this, mRightRawGrayTopic, qos); + log_cam_pub(mPubRawRightGray); + } + } + + if (!mDepthDisabled) { + if (mPublishImgRoiMask && (mAutoRoiEnabled || mManualRoiEnabled)) { + mPubRoiMask = image_transport::create_publisher(this, mRoiMaskTopic, qos); + log_cam_pub(mPubRoiMask); + } + if (mPublishDepthMap) { + mPubDepth = image_transport::create_publisher(this, mDepthTopic, qos); + log_cam_pub(mPubDepth); + } + if (mPublishConfidence) { + mPubConfMap = image_transport::create_publisher(this, mConfMapTopic, qos); + log_cam_pub(mPubConfMap); + } + } + + if (mPublishImgStereo) { + mPubStereo = image_transport::create_publisher(this, mStereoTopic, qos); + log_cam_pub(mPubStereo); + + if (mPublishImgRaw) { + mPubRawStereo = image_transport::create_publisher(this, mStereoRawTopic, qos); + log_cam_pub(mPubRawStereo); + } + } + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + // Nitros publishers lambda + auto make_nitros_img_pub = [&](const std::string & topic) { + auto ret = std::make_shared>( + this, topic, nvidia::isaac_ros::nitros::nitros_image_bgra8_t::supported_type_name, + nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig(), mQos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << topic); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << topic + "/nitros"); + return ret; + }; + + if (mPublishImgRgb) { + mNitrosPubRgb = make_nitros_img_pub(mRgbTopic); + if (mPublishImgGray) { + mNitrosPubRgbGray = make_nitros_img_pub(mRgbGrayTopic); + } + if (mPublishImgRaw) { + mNitrosPubRawRgb = make_nitros_img_pub(mRgbRawTopic); + } + if (mPublishImgGray && mPublishImgRaw) { + mNitrosPubRawRgbGray = make_nitros_img_pub(mRgbRawGrayTopic); + } + } + if (mPublishImgLeftRight) { + mNitrosPubLeft = make_nitros_img_pub(mLeftTopic); + mNitrosPubRight = make_nitros_img_pub(mRightTopic); + if (mPublishImgGray) { + mNitrosPubLeftGray = make_nitros_img_pub(mLeftGrayTopic); + mNitrosPubRightGray = make_nitros_img_pub(mRightGrayTopic); + } + if (mPublishImgRaw) { + mNitrosPubRawLeft = make_nitros_img_pub(mLeftRawTopic); + mNitrosPubRawRight = make_nitros_img_pub(mRightRawTopic); + } + if (mPublishImgGray && mPublishImgRaw) { + mNitrosPubRawLeftGray = make_nitros_img_pub(mLeftRawGrayTopic); + mNitrosPubRawRightGray = make_nitros_img_pub(mRightRawGrayTopic); + } + } + if (mPublishImgRoiMask && (mAutoRoiEnabled || mManualRoiEnabled)) { + mNitrosPubRoiMask = make_nitros_img_pub(mRoiMaskTopic); + } + if (mPublishDepthMap) { + mNitrosPubDepth = std::make_shared>( + this, mDepthTopic, nvidia::isaac_ros::nitros::nitros_image_32FC1_t::supported_type_name, + nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig(), mQos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << mDepthTopic); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << mDepthTopic + "/nitros"); + } + if (mPublishConfidence) { + mNitrosPubConfMap = make_nitros_img_pub(mConfMapTopic); + } +#endif + } + + // ----> Camera Info publishers + // Lambda to create and log CameraInfo publishers + auto make_cam_info_pub = [&](const std::string & topic) { + std::string info_topic = image_transport::getCameraInfoTopic(topic); + auto pub = create_publisher(info_topic, mQos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << pub->get_topic_name()); + return pub; + }; + + // Lambda to create and log CameraInfo publishers for image_transport or nitros + auto make_cam_info_trans_pub = [&](const std::string & topic) { + std::string info_topic = topic + "/camera_info"; + auto pub = create_publisher(info_topic, mQos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << pub->get_topic_name()); + return pub; + }; + + if (mPublishImgRgb) { + mPubRgbCamInfo = make_cam_info_pub(mRgbTopic); + mPubRgbCamInfoTrans = make_cam_info_trans_pub(mRgbTopic); + if (mPublishImgGray) { + mPubRgbGrayCamInfo = make_cam_info_pub(mRgbGrayTopic); + mPubRgbGrayCamInfoTrans = make_cam_info_trans_pub(mRgbGrayTopic); + } + if (mPublishImgRaw) { + mPubRawRgbCamInfo = make_cam_info_pub(mRgbRawTopic); + mPubRawRgbCamInfoTrans = make_cam_info_trans_pub(mRgbRawTopic); + } + if (mPublishImgGray && mPublishImgRaw) { + mPubRawRgbGrayCamInfo = make_cam_info_pub(mRgbRawGrayTopic); + mPubRawRgbGrayCamInfoTrans = make_cam_info_trans_pub(mRgbRawGrayTopic); + } + } + if (mPublishImgLeftRight) { + mPubLeftCamInfo = make_cam_info_pub(mLeftTopic); + mPubLeftCamInfoTrans = make_cam_info_trans_pub(mLeftTopic); + mPubRightCamInfo = make_cam_info_pub(mRightTopic); + mPubRightCamInfoTrans = make_cam_info_trans_pub(mRightTopic); + if (mPublishImgGray) { + mPubLeftGrayCamInfo = make_cam_info_pub(mLeftGrayTopic); + mPubLeftGrayCamInfoTrans = make_cam_info_trans_pub(mLeftGrayTopic); + mPubRightGrayCamInfo = make_cam_info_pub(mRightGrayTopic); + mPubRightGrayCamInfoTrans = make_cam_info_trans_pub(mRightGrayTopic); + } + if (mPublishImgRaw) { + mPubRawLeftCamInfo = make_cam_info_pub(mLeftRawTopic); + mPubRawLeftCamInfoTrans = make_cam_info_trans_pub(mLeftRawTopic); + mPubRawRightCamInfo = make_cam_info_pub(mRightRawTopic); + mPubRawRightCamInfoTrans = make_cam_info_trans_pub(mRightRawTopic); + } + if (mPublishImgGray && mPublishImgRaw) { + mPubRawLeftGrayCamInfo = make_cam_info_pub(mLeftRawGrayTopic); + mPubRawLeftGrayCamInfoTrans = make_cam_info_trans_pub(mLeftRawGrayTopic); + mPubRawRightGrayCamInfo = make_cam_info_pub(mRightRawGrayTopic); + mPubRawRightGrayCamInfoTrans = make_cam_info_trans_pub(mRightRawGrayTopic); + } + } + if (mPublishImgRoiMask && (mAutoRoiEnabled || mManualRoiEnabled)) { + mPubRoiMaskCamInfo = make_cam_info_pub(mRoiMaskTopic); + mPubRoiMaskCamInfoTrans = make_cam_info_trans_pub(mRoiMaskTopic); + } + if (mPublishDepthMap) { + mPubDepthCamInfo = make_cam_info_pub(mDepthTopic); + mPubDepthCamInfoTrans = make_cam_info_trans_pub(mDepthTopic); + } + if (mPublishConfidence) { + mPubConfMapCamInfo = make_cam_info_pub(mConfMapTopic); + mPubConfMapCamInfoTrans = make_cam_info_trans_pub(mConfMapTopic); + } + // <---- Camera Info publishers + + // ----> Other depth-related publishers + if (!mDepthDisabled) { + if (mPublishDepthInfo) { + mPubDepthInfo = create_publisher( + mDepthInfoTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubDepthInfo->get_topic_name()); + } + + // Point cloud and disparity publishers + if (mPublishDisparity) { + mPubDisparity = create_publisher( + mDisparityTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << mPubDisparity->get_topic_name()); + } + + if (mPublishPointcloud) { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + mPubCloud = point_cloud_transport::create_publisher( + shared_from_this(), mPointcloudTopic, qos, mPubOpt); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << mPubCloud.getTopic()); +#else + mPubCloud = create_publisher(mPointcloudTopic, mQos, mPubOpt); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << mPubCloud->get_topic_name()); +#endif + } + } + // <---- Other depth-related publishers +} + +void ZedCamera::getVideoParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== VIDEO parameters ==="); + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + if (!sl_tools::isZEDX(mCamUserModel)) { + sl_tools::getParam( + shared_from_this(), "video.brightness", mCamBrightness, + mCamBrightness, " * Brightness: ", true, 0, 8); + sl_tools::getParam( + shared_from_this(), "video.contrast", mCamContrast, + mCamContrast, " * Contrast: ", true, 0, 8); + sl_tools::getParam( + shared_from_this(), "video.hue", mCamHue, mCamHue, + " * Hue: ", true, 0, 11); + } + + sl_tools::getParam( + shared_from_this(), "video.saturation", mCamSaturation, + mCamSaturation, " * Saturation: ", true, 0, 8); + sl_tools::getParam( + shared_from_this(), "video.sharpness", mCamSharpness, + mCamSharpness, " * Sharpness: ", true, 0, 8); + sl_tools::getParam( + shared_from_this(), "video.gamma", mCamGamma, mCamGamma, + " * Gamma: ", true, 1, 9); + sl_tools::getParam( + shared_from_this(), "video.auto_exposure_gain", + mCamAutoExpGain, mCamAutoExpGain, + " * Auto Exposure/Gain: ", true); + if (mCamAutoExpGain) { + mTriggerAutoExpGain = true; + } + sl_tools::getParam( + shared_from_this(), "video.exposure", mCamExposure, + mCamExposure, " * Exposure: ", true, 0, 100); + sl_tools::getParam( + shared_from_this(), "video.gain", mCamGain, mCamGain, + " * Gain: ", true, 0, 100); + sl_tools::getParam( + shared_from_this(), "video.auto_whitebalance", mCamAutoWB, + mCamAutoWB, " * Auto White Balance: ", true); + if (mCamAutoWB) { + mTriggerAutoWB = true; + } + int wb = 42; + sl_tools::getParam( + shared_from_this(), "video.whitebalance_temperature", wb, + wb, " * White Balance Temperature: ", true, 28, 65); + mCamWBTemp = wb * 100; + + if (sl_tools::isZEDX(mCamUserModel)) { + sl_tools::getParam( + shared_from_this(), "video.exposure_time", mGmslExpTime, + mGmslExpTime, " * ZED X Exposure time: ", true, 28, + 66000); + sl_tools::getParam( + shared_from_this(), "video.auto_exposure_time_range_min", + mGmslAutoExpTimeRangeMin, mGmslAutoExpTimeRangeMin, + " * ZED X Auto Exp. time range min: ", true, 28, 66000); + sl_tools::getParam( + shared_from_this(), "video.auto_exposure_time_range_max", + mGmslAutoExpTimeRangeMax, mGmslAutoExpTimeRangeMax, + " * ZED X Auto Exp. time range max: ", true, 28, 66000); + sl_tools::getParam( + shared_from_this(), "video.exposure_compensation", + mGmslExposureComp, mGmslExposureComp, + " * ZED X Exposure comp.: ", true, 0, 100); + sl_tools::getParam( + shared_from_this(), "video.analog_gain", mGmslAnalogGain, + mGmslAnalogGain, " * ZED X Analog Gain: ", true, 1000, + 16000); + sl_tools::getParam( + shared_from_this(), "video.auto_analog_gain_range_min", + mGmslAnalogGainRangeMin, mGmslAnalogGainRangeMin, + " * ZED X Auto Analog Gain range min: ", true, 1000, + 16000); + sl_tools::getParam( + shared_from_this(), "video.auto_analog_gain_range_max", + mGmslAnalogGainRangeMax, mGmslAnalogGainRangeMax, + " * ZED X Auto Analog Gain range max: ", true, 1000, + 16000); + sl_tools::getParam( + shared_from_this(), "video.digital_gain", + mGmslDigitalGain, mGmslDigitalGain, + " * ZED X Digital Gain: ", true, 1, 256); + sl_tools::getParam( + shared_from_this(), "video.auto_digital_gain_range_min", + mGmslAutoDigitalGainRangeMin, + mGmslAutoDigitalGainRangeMin, + " * ZED X Auto Digital Gain range min: ", true, 1, 256); + sl_tools::getParam( + shared_from_this(), "video.auto_digital_gain_range_max", + mGmslAutoDigitalGainRangeMax, + mGmslAutoDigitalGainRangeMax, + " * ZED X Auto Digital Gain range max: ", true, 1, 256); + sl_tools::getParam( + shared_from_this(), "video.denoising", mGmslDenoising, + mGmslDenoising, + " * ZED X Auto Digital Gain range max: ", true, 0, 100); + } +} + +void ZedCamera::getDepthParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== DEPTH parameters ==="); + + std::string depth_mode_str = sl::toString(mDepthMode).c_str(); + sl_tools::getParam( + shared_from_this(), "depth.depth_mode", depth_mode_str, + depth_mode_str); + + // Check for model override syntax: e.g. 'NEURAL_LIGHT:neural_depth_light_5.3.model' + // Supported separators: ':', ',', ';' + mDepthModelOverride.clear(); + for (char sep : {':', ',', ';'}) { + auto pos = depth_mode_str.find(sep); + if (pos != std::string::npos) { + mDepthModelOverride = depth_mode_str.substr(pos + 1); + depth_mode_str = depth_mode_str.substr(0, pos); + break; + } + } + + if (!sl_tools::matchSdkEnum( + depth_mode_str, sl::DEPTH_MODE::NONE, + sl::DEPTH_MODE::LAST, mDepthMode)) + { + mDepthMode = sl::DEPTH_MODE::NEURAL; + if (sl_tools::toUpper(depth_mode_str) != "NEURAL_LIGHT") { + RCLCPP_WARN( + get_logger(), + "The parameter 'depth.depth_mode' contains a not valid string. " + "Please check it in 'common_stereo.yaml'."); + RCLCPP_WARN_STREAM(get_logger(), "Using default value: " << sl::toString(mDepthMode).c_str()); + } + } + + if (!mDepthModelOverride.empty()) { + RCLCPP_INFO_STREAM( + get_logger(), + " * Depth model override: " << mDepthModelOverride); + } + + if (mDepthMode == sl::DEPTH_MODE::NONE) { + mDepthDisabled = true; + mDepthStabilization = 0; + RCLCPP_INFO_STREAM( + get_logger(), + " * Depth mode: " << sl::toString(mDepthMode).c_str() + << " - DEPTH DISABLED"); + } else { + mDepthDisabled = false; + RCLCPP_INFO_STREAM( + get_logger(), + " * Depth mode: " << sl::toString(mDepthMode).c_str() + << " [" << static_cast(mDepthMode) + << "]"); + } + + if (!mDepthDisabled) { +#if ((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51) + const double default_min_depth = 0.1; +#else + const double default_min_depth = 0.01; +#endif + sl_tools::getParam( + shared_from_this(), "depth.min_depth", mCamMinDepth, + mCamMinDepth, " * Min depth [m]: ", false, + default_min_depth, 3.0); + sl_tools::getParam( + shared_from_this(), "depth.max_depth", mCamMaxDepth, + mCamMaxDepth, " * Max depth [m]: ", false, 0.5, 1000.0); + + sl_tools::getParam( + shared_from_this(), "depth.depth_stabilization", + mDepthStabilization, mDepthStabilization, + " * Depth Stabilization: ", false, -1, 100); + // -1 means use SDK default (mInitParams keeps its constructed default value) + + if (_nitrosDisabled) { + sl_tools::getParam( + shared_from_this(), "depth.openni_depth_mode", + mOpenniDepthMode, mOpenniDepthMode, + " * OpenNI mode (16bit depth): "); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + RCLCPP_INFO(get_logger(), " * OpenNI mode (16bit depth): DISABLED with NITROS"); +#endif + } + + sl_tools::getParam( + shared_from_this(), "depth.point_cloud_freq", mPcPubRate, + mPcPubRate, "", true, -1.0, static_cast(mCamGrabFrameRate)); + if (mPcPubRate <= 0.0) { + mPcPubRate = static_cast(mCamGrabFrameRate); + } + RCLCPP_INFO_STREAM( + get_logger(), + " * Point cloud rate [Hz]: " << mPcPubRate); + + std::string out_resol = "COMPACT"; + sl_tools::getParam( + shared_from_this(), "depth.point_cloud_res", out_resol, + out_resol); + if (out_resol == toString(PcRes::PUB)) { + mPcResolution = PcRes::PUB; + } else if (out_resol == toString(PcRes::FULL)) { + mPcResolution = PcRes::FULL; + } else if (out_resol == toString(PcRes::COMPACT)) { + mPcResolution = PcRes::COMPACT; + } else if (out_resol == toString(PcRes::REDUCED)) { + mPcResolution = PcRes::REDUCED; + } else { + RCLCPP_WARN( + get_logger(), + "Not valid 'depth.point_cloud_res' value: '%s'. Using default " + "setting instead.", + out_resol.c_str()); + out_resol = "COMPACT -> Fix the value in YAML!"; + mPcResolution = PcRes::COMPACT; + } + RCLCPP_INFO_STREAM( + get_logger(), + " * Point cloud resolution: " << out_resol.c_str()); + + sl_tools::getParam( + shared_from_this(), "depth.depth_confidence", mDepthConf, + mDepthConf, " * Depth Confidence: ", true, 0, 100); + sl_tools::getParam( + shared_from_this(), "depth.depth_texture_conf", + mDepthTextConf, mDepthTextConf, + " * Depth Texture Confidence: ", true, 0, 100); + sl_tools::getParam( + shared_from_this(), "depth.remove_saturated_areas", + mRemoveSatAreas, mRemoveSatAreas, + " * Remove saturated areas: ", true); + // ------------------------------------------ + } +} + +void ZedCamera::fillCamInfo( + const std::shared_ptr & zed, + const sensor_msgs::msg::CameraInfo::SharedPtr & leftCamInfoMsg, + const sensor_msgs::msg::CameraInfo::SharedPtr & rightCamInfoMsg, + const std::string & leftFrameId, const std::string & rightFrameId, + bool rawParam /*= false*/) +{ + sl::CalibrationParameters zedParam; + + if (rawParam) { + zedParam = zed->getCameraInformation(mMatResol) + .camera_configuration.calibration_parameters_raw; + } else { + zedParam = zed->getCameraInformation(mMatResol) + .camera_configuration.calibration_parameters; + } + + float baseline = zedParam.getCameraBaseline(); + + // ----> Distortion models + // ZED SDK params order: [ k1, k2, p1, p2, k3, k4, k5, k6, s1, s2, s3, s4] + // Radial (k1, k2, k3, k4, k5, k6), Tangential (p1,p2) and Prism (s1, s2, s3, + // s4) distortion. Prism not currently used. + + // ROS2 order (OpenCV) -> k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4 + switch (mCamRealModel) { + case sl::MODEL::ZED: // PLUMB_BOB + leftCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::PLUMB_BOB; + rightCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::PLUMB_BOB; + leftCamInfoMsg->d.resize(5); + rightCamInfoMsg->d.resize(5); + leftCamInfoMsg->d[0] = zedParam.left_cam.disto[0]; // k1 + leftCamInfoMsg->d[1] = zedParam.left_cam.disto[1]; // k2 + leftCamInfoMsg->d[2] = zedParam.left_cam.disto[2]; // p1 + leftCamInfoMsg->d[3] = zedParam.left_cam.disto[3]; // p2 + leftCamInfoMsg->d[4] = zedParam.left_cam.disto[4]; // k3 + rightCamInfoMsg->d[0] = zedParam.right_cam.disto[0]; // k1 + rightCamInfoMsg->d[1] = zedParam.right_cam.disto[1]; // k2 + rightCamInfoMsg->d[2] = zedParam.right_cam.disto[2]; // p1 + rightCamInfoMsg->d[3] = zedParam.right_cam.disto[3]; // p2 + rightCamInfoMsg->d[4] = zedParam.right_cam.disto[4]; // k3 + break; + + case sl::MODEL::ZED2: // RATIONAL_POLYNOMIAL + case sl::MODEL::ZED2i: // RATIONAL_POLYNOMIAL + case sl::MODEL::ZED_X: // RATIONAL_POLYNOMIAL + case sl::MODEL::ZED_XM: // RATIONAL_POLYNOMIAL + case sl::MODEL::VIRTUAL_ZED_X: // RATIONAL_POLYNOMIAL + leftCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL; + rightCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL; + leftCamInfoMsg->d.resize(8); + rightCamInfoMsg->d.resize(8); + leftCamInfoMsg->d[0] = zedParam.left_cam.disto[0]; // k1 + leftCamInfoMsg->d[1] = zedParam.left_cam.disto[1]; // k2 + leftCamInfoMsg->d[2] = zedParam.left_cam.disto[2]; // p1 + leftCamInfoMsg->d[3] = zedParam.left_cam.disto[3]; // p2 + leftCamInfoMsg->d[4] = zedParam.left_cam.disto[4]; // k3 + leftCamInfoMsg->d[5] = zedParam.left_cam.disto[5]; // k4 + leftCamInfoMsg->d[6] = zedParam.left_cam.disto[6]; // k5 + leftCamInfoMsg->d[7] = zedParam.left_cam.disto[7]; // k6 + rightCamInfoMsg->d[0] = zedParam.right_cam.disto[0]; // k1 + rightCamInfoMsg->d[1] = zedParam.right_cam.disto[1]; // k2 + rightCamInfoMsg->d[2] = zedParam.right_cam.disto[2]; // p1 + rightCamInfoMsg->d[3] = zedParam.right_cam.disto[3]; // p2 + rightCamInfoMsg->d[4] = zedParam.right_cam.disto[4]; // k3 + rightCamInfoMsg->d[5] = zedParam.right_cam.disto[5]; // k4 + rightCamInfoMsg->d[6] = zedParam.right_cam.disto[6]; // k5 + rightCamInfoMsg->d[7] = zedParam.right_cam.disto[7]; // k6 + break; + + case sl::MODEL::ZED_M: + if (zedParam.left_cam.disto[5] != 0 && // k4!=0 + zedParam.right_cam.disto[2] == 0 && // p1==0 + zedParam.right_cam.disto[3] == 0) // p2==0 + { + leftCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::EQUIDISTANT; + rightCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::EQUIDISTANT; + + leftCamInfoMsg->d.resize(4); + rightCamInfoMsg->d.resize(4); + leftCamInfoMsg->d[0] = zedParam.left_cam.disto[0]; // k1 + leftCamInfoMsg->d[1] = zedParam.left_cam.disto[1]; // k2 + leftCamInfoMsg->d[2] = zedParam.left_cam.disto[4]; // k3 + leftCamInfoMsg->d[3] = zedParam.left_cam.disto[5]; // k4 + rightCamInfoMsg->d[0] = zedParam.right_cam.disto[0]; // k1 + rightCamInfoMsg->d[1] = zedParam.right_cam.disto[1]; // k2 + rightCamInfoMsg->d[2] = zedParam.right_cam.disto[4]; // k3 + rightCamInfoMsg->d[3] = zedParam.right_cam.disto[5]; // k4 + } else { + leftCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::PLUMB_BOB; + rightCamInfoMsg->distortion_model = + sensor_msgs::distortion_models::PLUMB_BOB; + leftCamInfoMsg->d.resize(5); + rightCamInfoMsg->d.resize(5); + leftCamInfoMsg->d[0] = zedParam.left_cam.disto[0]; // k1 + leftCamInfoMsg->d[1] = zedParam.left_cam.disto[1]; // k2 + leftCamInfoMsg->d[2] = zedParam.left_cam.disto[2]; // p1 + leftCamInfoMsg->d[3] = zedParam.left_cam.disto[3]; // p2 + leftCamInfoMsg->d[4] = zedParam.left_cam.disto[4]; // k3 + rightCamInfoMsg->d[0] = zedParam.right_cam.disto[0]; // k1 + rightCamInfoMsg->d[1] = zedParam.right_cam.disto[1]; // k2 + rightCamInfoMsg->d[2] = zedParam.right_cam.disto[2]; // p1 + rightCamInfoMsg->d[3] = zedParam.right_cam.disto[3]; // p2 + rightCamInfoMsg->d[4] = zedParam.right_cam.disto[4]; // k3 + } + } + + leftCamInfoMsg->k.fill(0.0); + rightCamInfoMsg->k.fill(0.0); + leftCamInfoMsg->k[0] = static_cast(zedParam.left_cam.fx); + leftCamInfoMsg->k[2] = static_cast(zedParam.left_cam.cx); + leftCamInfoMsg->k[4] = static_cast(zedParam.left_cam.fy); + leftCamInfoMsg->k[5] = static_cast(zedParam.left_cam.cy); + leftCamInfoMsg->k[8] = 1.0; + rightCamInfoMsg->k[0] = static_cast(zedParam.right_cam.fx); + rightCamInfoMsg->k[2] = static_cast(zedParam.right_cam.cx); + rightCamInfoMsg->k[4] = static_cast(zedParam.right_cam.fy); + rightCamInfoMsg->k[5] = static_cast(zedParam.right_cam.cy); + rightCamInfoMsg->k[8] = 1.0; + leftCamInfoMsg->r.fill(0.0); + rightCamInfoMsg->r.fill(0.0); + + for (size_t i = 0; i < 3; i++) { + // identity + rightCamInfoMsg->r[i + i * 3] = 1; + leftCamInfoMsg->r[i + i * 3] = 1; + } + + if (rawParam) { + // ROS frame (X forward, Z up, Y left) + for (int i = 0; i < 9; i++) { + rightCamInfoMsg->r[i] = + zedParam.stereo_transform.getRotationMatrix().r[i]; + } + } + + leftCamInfoMsg->p.fill(0.0); + rightCamInfoMsg->p.fill(0.0); + leftCamInfoMsg->p[0] = static_cast(zedParam.left_cam.fx); + leftCamInfoMsg->p[2] = static_cast(zedParam.left_cam.cx); + leftCamInfoMsg->p[5] = static_cast(zedParam.left_cam.fy); + leftCamInfoMsg->p[6] = static_cast(zedParam.left_cam.cy); + leftCamInfoMsg->p[10] = 1.0; + // http://docs.ros.org/api/sensor_msgs/html/msg/CameraInfo.html + rightCamInfoMsg->p[3] = + static_cast(-1 * zedParam.left_cam.fx * baseline); + rightCamInfoMsg->p[0] = static_cast(zedParam.right_cam.fx); + rightCamInfoMsg->p[2] = static_cast(zedParam.right_cam.cx); + rightCamInfoMsg->p[5] = static_cast(zedParam.right_cam.fy); + rightCamInfoMsg->p[6] = static_cast(zedParam.right_cam.cy); + rightCamInfoMsg->p[10] = 1.0; + leftCamInfoMsg->width = rightCamInfoMsg->width = + static_cast(mMatResol.width); + leftCamInfoMsg->height = rightCamInfoMsg->height = + static_cast(mMatResol.height); + leftCamInfoMsg->header.frame_id = leftFrameId; + rightCamInfoMsg->header.frame_id = rightFrameId; +} + +bool ZedCamera::areVideoDepthSubscribed() +{ + if (!updateVideoDepthSubscribers()) { + return false; + } + + return ( + mRgbSubCount + mRgbRawSubCount + mRgbGraySubCount + mRgbGrayRawSubCount + + mLeftSubCount + mLeftRawSubCount + mLeftGraySubCount + mLeftGrayRawSubCount + + mRightSubCount + mRightRawSubCount + mRightGraySubCount + mRightGrayRawSubCount + + mStereoSubCount + mStereoRawSubCount + + mDepthSubCount + mConfMapSubCount + mDisparitySubCount + mDepthInfoSubCount + ) > 0; +} + +bool ZedCamera::updateVideoDepthSubscribers(bool force) +{ + constexpr auto kSubQueryInterval = std::chrono::milliseconds(200); + auto now = std::chrono::steady_clock::now(); + + if (!force && mVideoDepthSubCountInit && + (now - mLastVideoDepthSubCountQuery) < kSubQueryInterval) + { + return true; + } + + mLastVideoDepthSubCountQuery = now; + mVideoDepthSubCountInit = true; + + mRgbSubCount = 0; + mRgbRawSubCount = 0; + mRgbGraySubCount = 0; + mRgbGrayRawSubCount = 0; + mLeftSubCount = 0; + mLeftRawSubCount = 0; + mLeftGraySubCount = 0; + mLeftGrayRawSubCount = 0; + mRightSubCount = 0; + mRightRawSubCount = 0; + mRightGraySubCount = 0; + mRightGrayRawSubCount = 0; + mStereoSubCount = 0; + mStereoRawSubCount = 0; + mDepthSubCount = 0; + mConfMapSubCount = 0; + mDisparitySubCount = 0; + mDepthInfoSubCount = 0; + mPcSubCount = 0; + + try { + if (_nitrosDisabled) { + if (mPublishImgRgb) { + mRgbSubCount = mPubRgb.getNumSubscribers(); + if (mPublishImgRaw) { + mRgbRawSubCount = mPubRawRgb.getNumSubscribers(); + } + if (mPublishImgGray) { + mRgbGraySubCount = mPubRgbGray.getNumSubscribers(); + if (mPublishImgRaw) { + mRgbGrayRawSubCount = mPubRawRgbGray.getNumSubscribers(); + } + } + } + if (mPublishImgLeftRight) { + mLeftSubCount = mPubLeft.getNumSubscribers(); + mRightSubCount = mPubRight.getNumSubscribers(); + if (mPublishImgRaw) { + mLeftRawSubCount = mPubRawLeft.getNumSubscribers(); + mRightRawSubCount = mPubRawRight.getNumSubscribers(); + } + if (mPublishImgGray) { + mLeftGraySubCount = mPubLeftGray.getNumSubscribers(); + mRightGraySubCount = mPubRightGray.getNumSubscribers(); + if (mPublishImgRaw) { + mLeftGrayRawSubCount = mPubRawLeftGray.getNumSubscribers(); + mRightGrayRawSubCount = mPubRawRightGray.getNumSubscribers(); + } + } + } + if (mPublishImgStereo) { + mStereoSubCount = mPubStereo.getNumSubscribers(); + mStereoRawSubCount = mPubRawStereo.getNumSubscribers(); + } + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + mRgbSubCount = count_subscribers(mRgbTopic) + count_subscribers(mRgbTopic + "/nitros"); + mRgbRawSubCount = count_subscribers(mRgbRawTopic) + count_subscribers( + mRgbRawTopic + "/nitros"); + mRgbGraySubCount = count_subscribers(mRgbGrayTopic) + count_subscribers( + mRgbGrayTopic + "/nitros"); + mRgbGrayRawSubCount = count_subscribers(mRgbRawGrayTopic) + count_subscribers( + mRgbRawGrayTopic + "/nitros"); + mLeftSubCount = count_subscribers(mLeftTopic) + count_subscribers(mLeftTopic + "/nitros"); + mLeftRawSubCount = count_subscribers(mLeftRawTopic) + count_subscribers( + mLeftRawTopic + "/nitros"); + mLeftGraySubCount = count_subscribers(mLeftGrayTopic) + count_subscribers( + mLeftGrayTopic + "/nitros"); + mLeftGrayRawSubCount = count_subscribers(mLeftRawGrayTopic) + count_subscribers( + mLeftRawGrayTopic + "/nitros"); + mRightSubCount = count_subscribers(mRightTopic) + count_subscribers( + mRightTopic + "/nitros"); + mRightRawSubCount = count_subscribers(mRightRawTopic) + count_subscribers( + mRightRawTopic + "/nitros"); + mRightGraySubCount = count_subscribers(mRightGrayTopic) + count_subscribers( + mRightGrayTopic + "/nitros"); + mRightGrayRawSubCount = count_subscribers(mRightRawGrayTopic) + count_subscribers( + mRightRawGrayTopic + "/nitros"); + mStereoSubCount = count_subscribers(mStereoTopic) + count_subscribers( + mStereoTopic + "/nitros"); + mStereoRawSubCount = count_subscribers(mStereoRawTopic) + count_subscribers( + mStereoRawTopic + "/nitros"); +#endif + } + + + if (!mDepthDisabled) { + if (_nitrosDisabled) { + if (mPublishDepthMap) { + mDepthSubCount = mPubDepth.getNumSubscribers(); + } + if (mPublishConfidence) { + mConfMapSubCount = mPubConfMap.getNumSubscribers(); + } + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + mDepthSubCount = count_subscribers(mDepthTopic) + count_subscribers( + mDepthTopic + "/nitros"); + mConfMapSubCount = count_subscribers(mConfMapTopic) + count_subscribers( + mConfMapTopic + "/nitros"); +#endif + } + if (mPubDepthInfo) { + mDepthInfoSubCount = count_subscribers(mPubDepthInfo->get_topic_name()); + } + if (mPubDisparity) { + mDisparitySubCount = count_subscribers(mPubDisparity->get_topic_name()); + } + +#ifdef FOUND_POINT_CLOUD_TRANSPORT + mPcSubCount = mPubCloud.getNumSubscribers(); +#else + if (mPubCloud) { + mPcSubCount = count_subscribers(mPubCloud->get_topic_name()); + } +#endif + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_VD(" * [updateVideoDepthSubscribers] Exception while counting subscribers"); + return false; + } + + return true; +} + +bool ZedCamera::isDepthRequired() +{ + // DEBUG_STREAM_COMM( "isDepthRequired called"); + + if (mDepthDisabled) { + DEBUG_STREAM_COMM("Depth not required: depth disabled"); + return false; + } + + if (!updateVideoDepthSubscribers()) { + DEBUG_STREAM_VD(" * [isDepthRequired] failed to refresh subscribers, using cached values"); + } + + size_t tot_sub = + mDepthSubCount + mConfMapSubCount + mDisparitySubCount + mPcSubCount + + mDepthInfoSubCount; + + bool pos_tracking_required = isPosTrackingRequired(); + bool depth_required_for_pos_trk = pos_tracking_required; + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 52 + // With ZED SDK v5.2 we can use Positional Tracking `GEN_3` even if depth is + // disabled + depth_required_for_pos_trk = pos_tracking_required && + (mPosTrkMode != sl::POSITIONAL_TRACKING_MODE::GEN_3); +#endif + + return tot_sub > 0 || depth_required_for_pos_trk; +} + +void ZedCamera::applyDepthSettings() +{ + if (isDepthRequired()) { + std::lock_guard lock(mDynParMutex); + mRunParams.confidence_threshold = + mDepthConf; // Update depth confidence if changed + mRunParams.texture_confidence_threshold = + mDepthTextConf; // Update depth texture confidence if changed + mRunParams.remove_saturated_areas = mRemoveSatAreas; + + DEBUG_STREAM_COMM("Depth processing enabled"); + mRunParams.enable_depth = true; + + } else { + DEBUG_STREAM_COMM("Depth processing disabled"); + mRunParams.enable_depth = false; + } +} + +void ZedCamera::applyVideoSettings() +{ + if (!mSvoMode && mCamSettingsDirty && mFrameCount % 10 == 0) { + std::lock_guard lock(mDynParMutex); + + applyAutoExposureGainSettings(); + applyExposureGainSettings(); + applyWhiteBalanceSettings(); + applyBrightnessContrastHueSettings(); + applySaturationSharpnessGammaSettings(); + applyZEDXSettings(); + + mCamSettingsDirty = false; + } +} + +// Helper: Auto Exposure/Gain +void ZedCamera::applyAutoExposureGainSettings() +{ + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + + if (mTriggerAutoExpGain) { + setting = sl::VIDEO_SETTINGS::AEC_AGC; + err = mZed->setCameraSettings(setting, (mCamAutoExpGain ? 1 : 0)); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting AEC_AGC: " + << sl::toString(err).c_str()); + } else { + mTriggerAutoExpGain = false; + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " + << (mCamAutoExpGain ? 1 : 0)); + } + } +} + +// Helper: Exposure and Gain +void ZedCamera::applyExposureGainSettings() +{ + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + + if (!mCamAutoExpGain) { + int value; + err = mZed->getCameraSettings(sl::VIDEO_SETTINGS::EXPOSURE, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamExposure) { + mZed->setCameraSettings(sl::VIDEO_SETTINGS::EXPOSURE, mCamExposure); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " + << sl::toString(sl::VIDEO_SETTINGS::EXPOSURE).c_str() << ": " + << sl::toString(err).c_str()); + } + + err = mZed->getCameraSettings(sl::VIDEO_SETTINGS::GAIN, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamGain) { + err = mZed->setCameraSettings(sl::VIDEO_SETTINGS::GAIN, mCamGain); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " + << sl::toString(sl::VIDEO_SETTINGS::GAIN).c_str() + << ": " << sl::toString(err).c_str()); + } + } +} + +// Helper: White Balance +void ZedCamera::applyWhiteBalanceSettings() +{ + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + + if (mTriggerAutoWB) { + setting = sl::VIDEO_SETTINGS::WHITEBALANCE_AUTO; + err = mZed->setCameraSettings(setting, (mCamAutoWB ? 1 : 0)); + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } else { + mTriggerAutoWB = false; + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << (mCamAutoWB ? 1 : 0)); + } + } + + if (!mCamAutoWB) { + int value; + setting = sl::VIDEO_SETTINGS::WHITEBALANCE_TEMPERATURE; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamWBTemp) { + err = mZed->setCameraSettings(setting, mCamWBTemp); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +// Helper: Brightness, Contrast, Hue (not for ZED X) +void ZedCamera::applyBrightnessContrastHueSettings() +{ + if (!sl_tools::isZEDX(mCamRealModel)) { + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + int value; + + setting = sl::VIDEO_SETTINGS::BRIGHTNESS; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamBrightness) { + mZed->setCameraSettings(setting, mCamBrightness); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + + setting = sl::VIDEO_SETTINGS::CONTRAST; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamContrast) { + err = mZed->setCameraSettings(setting, mCamContrast); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + + setting = sl::VIDEO_SETTINGS::HUE; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamHue) { + mZed->setCameraSettings(setting, mCamHue); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +// Helper: Saturation, Sharpness, Gamma +void ZedCamera::applySaturationSharpnessGammaSettings() +{ + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting; + int value; + + setting = sl::VIDEO_SETTINGS::SATURATION; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamSaturation) { + mZed->setCameraSettings(setting, mCamSaturation); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " << sl::toString(setting).c_str() + << ": " << sl::toString(err).c_str()); + } + + setting = sl::VIDEO_SETTINGS::SHARPNESS; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamSharpness) { + mZed->setCameraSettings(setting, mCamSharpness); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " << sl::toString(setting).c_str() + << ": " << sl::toString(err).c_str()); + } + + setting = sl::VIDEO_SETTINGS::GAMMA; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mCamGamma) { + err = mZed->setCameraSettings(setting, mCamGamma); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " << sl::toString(setting).c_str() + << ": " << sl::toString(err).c_str()); + } +} + +// Helper: ZED X specific settings +void ZedCamera::applyZEDXSettings() +{ + if (!sl_tools::isZEDX(mCamRealModel)) { + return; + } + + applyZEDXExposureSettings(); + applyZEDXAutoExposureTimeRange(); + applyZEDXExposureCompensation(); + applyZEDXAnalogDigitalGain(); + applyZEDXAutoAnalogGainRange(); + applyZEDXAutoDigitalGainRange(); + applyZEDXDenoising(); +} + +void ZedCamera::applyZEDXExposureSettings() +{ + if (!mCamAutoExpGain) { + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting = sl::VIDEO_SETTINGS::EXPOSURE_TIME; + int value; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mGmslExpTime) { + err = mZed->setCameraSettings(setting, mGmslExpTime); + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << mGmslExpTime << " [Old " + << value << "]"); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +void ZedCamera::applyZEDXAutoExposureTimeRange() +{ + // ----> TODO(Walter) Remove this check when fixed in ZED SDK + if (mCamRealModel == sl::MODEL::VIRTUAL_ZED_X) { + DEBUG_VD( + "Auto Digital Gain Range not supported for VIRTUAL_ZED_X model. " + "Skipping setting."); + return; + } + // <---- TODO(Walter) Remove this check when fixed in ZED SDK + + sl::ERROR_CODE err; + int value_min, value_max; + err = mZed->getCameraSettings( + sl::VIDEO_SETTINGS::AUTO_EXPOSURE_TIME_RANGE, + value_min, value_max); + if (err == sl::ERROR_CODE::SUCCESS && + (value_min != mGmslAutoExpTimeRangeMin || + value_max != mGmslAutoExpTimeRangeMax)) + { + err = mZed->setCameraSettings( + sl::VIDEO_SETTINGS::AUTO_EXPOSURE_TIME_RANGE, + mGmslAutoExpTimeRangeMin, mGmslAutoExpTimeRangeMax); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString( + sl::VIDEO_SETTINGS::AUTO_EXPOSURE_TIME_RANGE) + .c_str() + << ": " << sl::toString(err).c_str()); + } +} + +void ZedCamera::applyZEDXExposureCompensation() +{ + if (!mStreamMode) { + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting = sl::VIDEO_SETTINGS::EXPOSURE_COMPENSATION; + int value; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mGmslExposureComp) { + err = mZed->setCameraSettings(setting, mGmslExposureComp); + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << mGmslExposureComp + << " [Old " << value << "]"); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +void ZedCamera::applyZEDXAnalogDigitalGain() +{ + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting = sl::VIDEO_SETTINGS::ANALOG_GAIN; + + if (!mCamAutoExpGain) { + int value; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mGmslAnalogGain) { + err = mZed->setCameraSettings(setting, mGmslAnalogGain); + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << mGmslAnalogGain + << " [Old " << value << "]"); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + + setting = sl::VIDEO_SETTINGS::DIGITAL_GAIN; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mGmslDigitalGain) { + err = mZed->setCameraSettings(setting, mGmslDigitalGain); + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << mGmslDigitalGain + << " [Old " << value << "]"); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +void ZedCamera::applyZEDXAutoAnalogGainRange() +{ + // ----> TODO(Walter) Remove this check when fixed in ZED SDK + if (mCamRealModel == sl::MODEL::VIRTUAL_ZED_X) { + DEBUG_VD( + "Auto Analog Gain Range not supported for VIRTUAL_ZED_X model. " + "Skipping setting."); + return; + } + // <---- TODO(Walter) Remove this check when fixed in ZED SDK + + sl::ERROR_CODE err; + int value_min, value_max; + err = mZed->getCameraSettings( + sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE, + value_min, value_max); + if (err == sl::ERROR_CODE::SUCCESS && + (value_min != mGmslAnalogGainRangeMin || + value_max != mGmslAnalogGainRangeMax)) + { + err = mZed->setCameraSettings( + sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE, + mGmslAnalogGainRangeMin, + mGmslAnalogGainRangeMax); + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " + << sl::toString(sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE) + .c_str() + << ": " << sl::toString(err).c_str()); + } + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " + << sl::toString( + sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE) + .c_str() + << ": " << sl::toString(err).c_str()); + } +} + +void ZedCamera::applyZEDXAutoDigitalGainRange() +{ + // ----> TODO(Walter) Remove this check when fixed in ZED SDK + if (mCamRealModel == sl::MODEL::VIRTUAL_ZED_X) { + DEBUG_VD( + "Auto Digital Gain Range not supported for VIRTUAL_ZED_X model. " + "Skipping setting."); + return; + } + // <---- TODO(Walter) Remove this check when fixed in ZED SDK + + sl::ERROR_CODE err; + int value_min, value_max; + err = mZed->getCameraSettings( + sl::VIDEO_SETTINGS::AUTO_DIGITAL_GAIN_RANGE, + value_min, value_max); + if (err == sl::ERROR_CODE::SUCCESS && + (value_min != mGmslAutoDigitalGainRangeMin || + value_max != mGmslAutoDigitalGainRangeMax)) + { + err = mZed->setCameraSettings( + sl::VIDEO_SETTINGS::AUTO_DIGITAL_GAIN_RANGE, + mGmslAutoDigitalGainRangeMin, + mGmslAutoDigitalGainRangeMax); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Error setting " + << sl::toString( + sl::VIDEO_SETTINGS::AUTO_DIGITAL_GAIN_RANGE) + .c_str() + << ": " << sl::toString(err).c_str()); + } +} + +void ZedCamera::applyZEDXDenoising() +{ + if (!mStreamMode) { + sl::ERROR_CODE err; + sl::VIDEO_SETTINGS setting = sl::VIDEO_SETTINGS::DENOISING; + int value; + err = mZed->getCameraSettings(setting, value); + if (err == sl::ERROR_CODE::SUCCESS && value != mGmslDenoising) { + err = mZed->setCameraSettings(setting, mGmslDenoising); + DEBUG_STREAM_CTRL( + "New setting for " << sl::toString(setting).c_str() + << ": " << mGmslDenoising + << " [Old " << value << "]"); + } + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << sl::toString(setting).c_str() + << ": " + << sl::toString(err).c_str()); + } + } +} + +void ZedCamera::processVideoDepth() +{ + DEBUG_VD("=== Process Video/Depth ==="); + + // If no subscribers, do not retrieve data + if (areVideoDepthSubscribed()) { + DEBUG_VD(" * [processVideoDepth] vd_lock -> defer"); + std::unique_lock vd_lock(mVdMutex, std::defer_lock); + + DEBUG_VD(" * [processVideoDepth] vd_lock -> try_lock"); + if (vd_lock.try_lock()) { + bool gpu = false; +#ifdef FOUND_ISAAC_ROS_NITROS + if (!_nitrosDisabled) { + gpu = true; + } +#endif + retrieveVideoDepth(gpu); + + // Signal Video/Depth thread that a new pointcloud is ready + mVdDataReadyCondVar.notify_one(); + mVdDataReady = true; + mVdPublishing = true; + } else { + DEBUG_VD(" * [processVideoDepth] vd_lock not locked"); + } + } else { + mVdPublishing = false; + DEBUG_VD(" * [processVideoDepth] No video/depth subscribers"); + + // Publish camera infos even if no video/depth subscribers are present + publishCameraInfos(); + } + DEBUG_VD("=== Process Video/Depth done ==="); +} + +void ZedCamera::retrieveVideoDepth(bool gpu) +{ + DEBUG_VD(" *** Retrieving Video/Depth Data ***"); + mRgbSubscribed = false; + bool retrieved_video = false; + bool retrieved_depth = false; + + DEBUG_STREAM_VD(" *** Retrieving Video Data ***"); + retrieved_video |= retrieveLeftImage(gpu); + retrieved_video |= retrieveLeftRawImage(gpu); + retrieved_video |= retrieveRightImage(gpu); + retrieved_video |= retrieveRightRawImage(gpu); + retrieved_video |= retrieveLeftGrayImage(gpu); + retrieved_video |= retrieveLeftRawGrayImage(gpu); + retrieved_video |= retrieveRightGrayImage(gpu); + retrieved_video |= retrieveRightRawGrayImage(gpu); + + if (retrieved_video) { + DEBUG_STREAM_VD(" *** Video Data retrieved ***"); + } + + DEBUG_STREAM_VD(" *** Retrieving Depth Data ***"); + retrieved_depth |= retrieveDepthMap(gpu); + retrieved_depth |= retrieveConfidence(gpu); + retrieved_depth |= retrieveDisparity(); + retrieved_depth |= retrieveDepthInfo(); + + if (retrieved_depth) { + DEBUG_STREAM_VD(" *** Depth Data retrieved ***"); + } + + if (retrieved_video || retrieved_depth) { + mSdkGrabTS = mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE); + auto now = mZed->getTimestamp(sl::TIME_REFERENCE::CURRENT); + DEBUG_STREAM_VD( + " * Video/Depth Latency: " << static_cast(now - mSdkGrabTS) * 1e-9 << " sec"); + } + + DEBUG_VD(" *** Retrieving Video/Depth Data DONE ***"); +} + +// Helper functions for retrieveVideoDepth() + +bool ZedCamera::retrieveLeftImage(bool gpu) +{ + if (mRgbSubCount + mLeftSubCount + mStereoSubCount > 0) { + DEBUG_VD(" * Retrieving Left image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatLeft, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + m24bitMode ? sl::VIEW::LEFT_BGR : sl::VIEW::LEFT_BGRA, +#else + sl::VIEW::LEFT, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + mRgbSubscribed = true; + DEBUG_STREAM_VD(" * Left image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveLeftRawImage(bool gpu) +{ + if (mRgbRawSubCount + mLeftRawSubCount + mStereoRawSubCount > 0) { + DEBUG_VD(" * Retrieving Left raw image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatLeftRaw, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + m24bitMode ? sl::VIEW::LEFT_UNRECTIFIED_BGR : sl::VIEW::LEFT_UNRECTIFIED_BGRA, +#else + sl::VIEW::LEFT_UNRECTIFIED, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Left raw image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveRightImage(bool gpu) +{ + if (mRightSubCount + mStereoSubCount > 0) { + DEBUG_VD(" * Retrieving Right image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatRight, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + m24bitMode ? sl::VIEW::RIGHT_BGR : sl::VIEW::RIGHT_BGRA, +#else + sl::VIEW::RIGHT, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Right image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveRightRawImage(bool gpu) +{ + if (mRightRawSubCount + mStereoRawSubCount > 0) { + DEBUG_VD(" * Retrieving Right raw image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatRightRaw, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + m24bitMode ? sl::VIEW::RIGHT_UNRECTIFIED_BGR : sl::VIEW::RIGHT_UNRECTIFIED_BGRA, +#else + sl::VIEW::RIGHT_UNRECTIFIED, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Right raw image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveLeftGrayImage(bool gpu) +{ + if (mRgbGraySubCount + mLeftGraySubCount > 0) { + DEBUG_VD(" * Retrieving Left gray image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatLeftGray, sl::VIEW::LEFT_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Left gray image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveLeftRawGrayImage(bool gpu) +{ + if (mRgbGrayRawSubCount + mLeftGrayRawSubCount > 0) { + DEBUG_VD(" * Retrieving Left gray raw image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatLeftRawGray, sl::VIEW::LEFT_UNRECTIFIED_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD( + " * Left gray raw image retrieved into " << (gpu ? "GPU" : "CPU") << + " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveRightGrayImage(bool gpu) +{ + if (mRightGraySubCount > 0) { + DEBUG_VD(" * Retrieving Right gray image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatRightGray, sl::VIEW::RIGHT_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Right gray image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveRightRawGrayImage(bool gpu) +{ + if (mRightGrayRawSubCount > 0) { + DEBUG_VD(" * Retrieving Right gray raw image"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveImage( + mMatRightRawGray, sl::VIEW::RIGHT_UNRECTIFIED_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD( + " * Right gray raw image retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveDepthMap(bool gpu) +{ + if (mDepthSubCount > 0 || mDepthInfoSubCount > 0) { + DEBUG_STREAM_VD(" * Retrieving Depth Map"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveMeasure( + mMatDepth, sl::MEASURE::DEPTH, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Depth map retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveDisparity() +{ + if (mDisparitySubCount > 0) { + DEBUG_STREAM_VD(" * Retrieving Disparity"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveMeasure( + mMatDisp, sl::MEASURE::DISPARITY, + sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_VD(" * Disparity map retrieved"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveConfidence(bool gpu) +{ + if (mConfMapSubCount > 0) { + DEBUG_STREAM_VD(" * Retrieving Confidence"); + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->retrieveMeasure( + mMatConf, sl::MEASURE::CONFIDENCE, + gpu ? sl::MEM::GPU : sl::MEM::CPU, mMatResol); + if (ok) { + DEBUG_STREAM_VD(" * Confidence map retrieved into " << (gpu ? "GPU" : "CPU") << " memory"); + } + return ok; + } + return false; +} + +bool ZedCamera::retrieveDepthInfo() +{ + if (mDepthInfoSubCount > 0) { + bool ok = sl::ERROR_CODE::SUCCESS == + mZed->getCurrentMinMaxDepth(mMinDepth, mMaxDepth); + if (ok) { + DEBUG_VD(" * Depth info retrieved"); + } + return ok; + } + return false; +} + +void ZedCamera::publishVideoDepth(rclcpp::Time & out_pub_ts) +{ + DEBUG_VD("=== Publish Video and Depth topics === "); + sl_tools::StopWatch vdElabTimer(get_clock()); + + checkRgbDepthSync(); + + vdElabTimer.tic(); + rclcpp::Time timeStamp; + + if (!checkGrabAndUpdateTimestamp(timeStamp)) { + return; + } + + publishLeftAndRgbImages(timeStamp); + publishLeftRawAndRgbRawImages(timeStamp); + publishLeftGrayAndRgbGrayImages(timeStamp); + publishLeftRawGrayAndRgbRawGrayImages(timeStamp); + publishRightImages(timeStamp); + publishRightRawImages(timeStamp); + publishRightGrayImages(timeStamp); + publishRightRawGrayImages(timeStamp); + publishStereoImages(timeStamp); + publishStereoRawImages(timeStamp); + publishDepthImage(timeStamp); + publishConfidenceMap(timeStamp); + publishDisparityImage(timeStamp); + publishDepthInfo(timeStamp); + + + mVideoDepthElabMean_sec->addValue(vdElabTimer.toc()); + + out_pub_ts = timeStamp; + + DEBUG_VD("=== Video and Depth topics published === "); +} + +// Helper functions for publishVideoDepth + +void ZedCamera::checkRgbDepthSync() +{ + sl::Timestamp ts_rgb = 0; + sl::Timestamp ts_depth = 0; + + if (mRgbSubscribed && (mDepthSubCount > 0 || mDepthInfoSubCount > 0)) { + ts_rgb = mMatLeft.timestamp; + ts_depth = mMatDepth.timestamp; + + if (mRgbSubscribed && + (ts_rgb.data_ns != 0 && (ts_depth.data_ns != ts_rgb.data_ns))) + { + RCLCPP_WARN_STREAM( + get_logger(), + " !!!!! DEPTH/RGB ASYNC !!!!! - Delta: " + << 1e-9 * static_cast(ts_depth - ts_rgb) + << " sec"); + RCLCPP_WARN( + get_logger(), + " NOTE: this should never happen, please contact the node " + "maintainer in case you get this warning."); + } + } +} + +bool ZedCamera::checkGrabAndUpdateTimestamp(rclcpp::Time & out_pub_ts) +{ + if (mSdkGrabTS.getNanoseconds() == mLastTs_grab.getNanoseconds()) { + out_pub_ts = TIMEZERO_ROS; + DEBUG_VD(" * publishVideoDepth: ignoring not update data"); + DEBUG_STREAM_VD( + " * Latest Ts: " << mLastTs_grab.getNanoseconds() << " - New Ts: " << + mSdkGrabTS.getNanoseconds()); + return false; + } + + if (mSdkGrabTS.data_ns != 0) { + if (!mSvoMode) { + double period_sec = + static_cast(mSdkGrabTS.data_ns - mLastTs_grab.data_ns) / 1e9; + DEBUG_STREAM_VD( + " * VIDEO/DEPTH PUB LAST PERIOD: " + << period_sec << " sec @" << 1. / period_sec << " Hz / Expected: " << 1. / mVdPubRate << " sec @" << mVdPubRate << + " Hz"); + + mVideoDepthPeriodMean_sec->addValue(period_sec); + DEBUG_STREAM_VD( + " * VIDEO/DEPTH PUB MEAN PERIOD: " + << mVideoDepthPeriodMean_sec->getAvg() << " sec @" + << 1. / mVideoDepthPeriodMean_sec->getAvg() << " Hz / Expected: " << 1. / mVdPubRate << " sec @" << mVdPubRate << + " Hz"); + mLastTs_grab = mSdkGrabTS; + } + } + + if (mSvoMode) { + out_pub_ts = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else if (mSimMode) { + if (mUseSimTime) { + out_pub_ts = get_clock()->now(); + } else { + out_pub_ts = mUsePubTimestamps ? get_clock()->now() : + sl_tools::slTime2Ros(mZed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + } else { + out_pub_ts = mUsePubTimestamps ? get_clock()->now() : sl_tools::slTime2Ros( + mSdkGrabTS, + get_clock()->get_clock_type()); + } + return true; +} + +void ZedCamera::publishLeftAndRgbImages(const rclcpp::Time & t) +{ + if (mLeftSubCount > 0) { + DEBUG_STREAM_VD(" * mLeftSubCount: " << mLeftSubCount); + + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeft, mPubLeft, mPubLeftCamInfo, mPubLeftCamInfoTrans, + mLeftCamInfoMsg, mLeftCamOptFrameId, t); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeft, mNitrosPubLeft, mPubLeftCamInfo, mPubLeftCamInfoTrans, mLeftCamInfoMsg, + mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubLeftCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubLeftCamInfoTrans, mLeftCamInfoMsg, t); + } + + if (mRgbSubCount > 0) { + DEBUG_STREAM_VD(" * mRgbSubCount: " << mRgbSubCount); + + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeft, mPubRgb, mPubRgbCamInfo, mPubRgbCamInfoTrans, mLeftCamInfoMsg, + mLeftCamOptFrameId, t); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeft, mNitrosPubRgb, mPubRgbCamInfo, mPubRgbCamInfoTrans, mLeftCamInfoMsg, + mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRgbCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubRgbCamInfoTrans, mLeftCamInfoMsg, t); + } +} + +void ZedCamera::publishLeftRawAndRgbRawImages(const rclcpp::Time & t) +{ + if (mLeftRawSubCount > 0) { + DEBUG_STREAM_VD(" * mLeftRawSubCount: " << mLeftRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftRaw, + mPubRawLeft, + mPubRawLeftCamInfo, + mPubRawLeftCamInfoTrans, + mLeftCamInfoRawMsg, + mLeftCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftRaw, mNitrosPubRawLeft, mPubRawLeftCamInfo, mPubRawLeftCamInfoTrans, + mLeftCamInfoRawMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawLeftCamInfo, mLeftCamInfoRawMsg, t); + publishCameraInfo(mPubRawLeftCamInfoTrans, mLeftCamInfoRawMsg, t); + } + + if (mRgbRawSubCount > 0) { + DEBUG_STREAM_VD(" * mRgbRawSubCount: " << mRgbRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftRaw, mPubRawRgb, mPubRawRgbCamInfo, mPubRawRgbCamInfoTrans, + mLeftCamInfoRawMsg, mLeftCamOptFrameId, t); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftRaw, mNitrosPubRawRgb, mPubRawRgbCamInfo, mPubRawRgbCamInfoTrans, + mLeftCamInfoRawMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawRgbCamInfo, mLeftCamInfoRawMsg, t); + publishCameraInfo(mPubRawRgbCamInfoTrans, mLeftCamInfoRawMsg, t); + } +} + +void ZedCamera::publishLeftGrayAndRgbGrayImages(const rclcpp::Time & t) +{ + if (mLeftGraySubCount > 0) { + DEBUG_STREAM_VD(" * mLeftGraySubCount: " << mLeftGraySubCount); + + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftGray, + mPubLeftGray, + mPubLeftGrayCamInfo, + mPubLeftGrayCamInfoTrans, + mLeftCamInfoMsg, + mLeftCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftGray, mNitrosPubLeftGray, mPubLeftGrayCamInfo, mPubLeftGrayCamInfoTrans, + mLeftCamInfoMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubLeftGrayCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubLeftGrayCamInfoTrans, mLeftCamInfoMsg, t); + } + + if (mRgbGraySubCount > 0) { + DEBUG_STREAM_VD(" * mRgbGraySubCount: " << mRgbGraySubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftGray, + mPubRgbGray, + mPubRgbGrayCamInfo, + mPubRgbGrayCamInfoTrans, + mLeftCamInfoMsg, + mLeftCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftGray, mNitrosPubRgbGray, mPubRgbGrayCamInfo, mPubRgbGrayCamInfoTrans, + mLeftCamInfoMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRgbGrayCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubRgbGrayCamInfoTrans, mLeftCamInfoMsg, t); + } +} + +void ZedCamera::publishLeftRawGrayAndRgbRawGrayImages(const rclcpp::Time & t) +{ + if (mLeftGrayRawSubCount > 0) { + DEBUG_STREAM_VD(" * mLeftGrayRawSubCount: " << mLeftGrayRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftRawGray, + mPubRawLeftGray, + mPubRawLeftGrayCamInfo, + mPubRawLeftGrayCamInfoTrans, + mLeftCamInfoRawMsg, + mLeftCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftRawGray, mNitrosPubRawLeftGray, mPubRawLeftGrayCamInfo, mPubRawLeftGrayCamInfoTrans, + mLeftCamInfoRawMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawLeftGrayCamInfo, mLeftCamInfoRawMsg, t); + publishCameraInfo(mPubRawLeftGrayCamInfoTrans, mLeftCamInfoRawMsg, t); + } + + if (mRgbGrayRawSubCount > 0) { + DEBUG_STREAM_VD(" * mRgbGrayRawSubCount: " << mRgbGrayRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatLeftRawGray, + mPubRawRgbGray, + mPubRawRgbGrayCamInfo, + mPubRawRgbGrayCamInfoTrans, + mLeftCamInfoRawMsg, + mLeftCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatLeftRawGray, mNitrosPubRawRgbGray, mPubRawRgbGrayCamInfo, mPubRawRgbGrayCamInfoTrans, + mLeftCamInfoRawMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawRgbGrayCamInfo, mLeftCamInfoRawMsg, t); + publishCameraInfo(mPubRawRgbGrayCamInfoTrans, mLeftCamInfoRawMsg, t); + } +} + +void ZedCamera::publishRightImages(const rclcpp::Time & t) +{ + if (mRightSubCount > 0) { + DEBUG_STREAM_VD(" * mRightSubCount: " << mRightSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatRight, mPubRight, mPubRightCamInfo, mPubRightCamInfoTrans, + mRightCamInfoMsg, mRightCamOptFrameId, t); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatRight, mNitrosPubRight, mPubRightCamInfo, mPubRightCamInfoTrans, + mRightCamInfoMsg, mRightCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRightCamInfo, mRightCamInfoMsg, t); + publishCameraInfo(mPubRightCamInfoTrans, mRightCamInfoMsg, t); + } +} + +void ZedCamera::publishRightRawImages(const rclcpp::Time & t) +{ + if (mRightRawSubCount > 0) { + DEBUG_STREAM_VD(" * mRightRawSubCount: " << mRightRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatRightRaw, + mPubRawRight, + mPubRawRightCamInfo, + mPubRawRightCamInfoTrans, + mRightCamInfoRawMsg, + mRightCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatRightRaw, mNitrosPubRawRight, mPubRawRightCamInfo, mPubRawRightCamInfoTrans, + mRightCamInfoRawMsg, mRightCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawRightCamInfo, mRightCamInfoRawMsg, t); + publishCameraInfo(mPubRawRightCamInfoTrans, mRightCamInfoRawMsg, t); + } +} + +void ZedCamera::publishRightGrayImages(const rclcpp::Time & t) +{ + if (mRightGraySubCount > 0) { + DEBUG_STREAM_VD(" * mRightGraySubCount: " << mRightGraySubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatRightGray, + mPubRightGray, + mPubRightGrayCamInfo, + mPubRightGrayCamInfoTrans, + mRightCamInfoMsg, + mRightCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatRightGray, mNitrosPubRightGray, mPubRightGrayCamInfo, mPubRightGrayCamInfoTrans, + mRightCamInfoMsg, mRightCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRightGrayCamInfo, mRightCamInfoMsg, t); + publishCameraInfo(mPubRightGrayCamInfoTrans, mRightCamInfoMsg, t); + } +} + +void ZedCamera::publishRightRawGrayImages(const rclcpp::Time & t) +{ + if (mRightGrayRawSubCount > 0) { + DEBUG_STREAM_VD(" * mRightGrayRawSubCount: " << mRightGrayRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatRightRawGray, + mPubRawRightGray, + mPubRawRightGrayCamInfo, + mPubRawRightGrayCamInfoTrans, + mRightCamInfoRawMsg, + mRightCamOptFrameId, + t + ); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatRightRawGray, mNitrosPubRawRightGray, mPubRawRightGrayCamInfo, + mPubRawRightGrayCamInfoTrans, + mRightCamInfoRawMsg, mRightCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubRawRightGrayCamInfo, mRightCamInfoRawMsg, t); + publishCameraInfo(mPubRawRightGrayCamInfoTrans, mRightCamInfoRawMsg, t); + } +} + +void ZedCamera::publishStereoImages(const rclcpp::Time & t) +{ + if (mStereoSubCount > 0) { + DEBUG_STREAM_VD(" * mStereoSubCount: " << mStereoSubCount); + auto combined = sl_tools::imagesToROSmsg( + mMatLeft, mMatRight, mCenterFrameId, t, mUsePubTimestamps); + DEBUG_STREAM_VD(" * Publishing SIDE-BY-SIDE message"); + try { + mPubStereo.publish(std::move(combined)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishStereoRawImages(const rclcpp::Time & t) +{ + if (mStereoRawSubCount > 0) { + DEBUG_STREAM_VD(" * mStereoRawSubCount: " << mStereoRawSubCount); + auto combined = sl_tools::imagesToROSmsg( + mMatLeftRaw, mMatRightRaw, mCenterFrameId, t, mUsePubTimestamps); + DEBUG_STREAM_VD(" * Publishing SIDE-BY-SIDE RAW message"); + try { + mPubRawStereo.publish(std::move(combined)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishDepthImage(const rclcpp::Time & t) +{ + if (mDepthSubCount > 0) { + publishDepthMapWithInfo(mMatDepth, t); + } else { + publishCameraInfo(mPubDepthCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubDepthCamInfoTrans, mLeftCamInfoMsg, t); + } +} + +void ZedCamera::publishConfidenceMap(const rclcpp::Time & t) +{ + if (mConfMapSubCount > 0) { + DEBUG_STREAM_VD(" * mConfMapSubCount: " << mConfMapSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + mMatConf, mPubConfMap, mPubConfMapCamInfo, mPubConfMapCamInfoTrans, + mLeftCamInfoMsg, mLeftCamOptFrameId, t); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + mMatConf, mNitrosPubConfMap, mPubConfMapCamInfo, mPubConfMapCamInfoTrans, + mLeftCamInfoMsg, mLeftCamOptFrameId, t); +#endif + } + } else { + publishCameraInfo(mPubConfMapCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubConfMapCamInfoTrans, mLeftCamInfoMsg, t); + } +} + +void ZedCamera::publishDisparityImage(const rclcpp::Time & t) +{ + if (mDisparitySubCount > 0) { + publishDisparity(mMatDisp, t); + } +} + +void ZedCamera::publishDepthInfo(const rclcpp::Time & t) +{ + if (mDepthInfoSubCount > 0) { + auto depthInfoMsg = std::make_unique(); + depthInfoMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + depthInfoMsg->header.frame_id = mDepthOptFrameId; + depthInfoMsg->min_depth = mMinDepth; + depthInfoMsg->max_depth = mMaxDepth; + + DEBUG_STREAM_VD(" * Publishing DEPTH INFO message"); + try { + if (mPubDepthInfo) { + mPubDepthInfo->publish(std::move(depthInfoMsg)); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } + } +} + +void ZedCamera::publishCameraInfo( + const camInfoPub & infoPub, + camInfoMsgPtr & camInfoMsg, + const rclcpp::Time & t) +{ + auto ts = mUsePubTimestamps ? get_clock()->now() : t; + camInfoMsg->header.stamp = ts; + + if (infoPub) { + if (count_subscribers(infoPub->get_topic_name()) > 0) { + infoPub->publish(*camInfoMsg); + DEBUG_STREAM_VD(" * Camera Info message published: " << infoPub->get_topic_name()); + DEBUG_STREAM_VD(" * Timestamp: " << ts.nanoseconds() << " nsec"); + } + } +} + +void ZedCamera::publishImageWithInfo( + const sl::Mat & img, + const image_transport::Publisher & pubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t) +{ + auto image = sl_tools::imageToROSmsg(img, imgFrameId, t, mUsePubTimestamps); + DEBUG_STREAM_VD( + " * Publishing IMAGE message: " << (mUsePubTimestamps ? get_clock()->now() : t).nanoseconds() << + " nsec"); + try { + publishCameraInfo(infoPub, camInfoMsg, image->header.stamp); + publishCameraInfo(infoPubTrans, camInfoMsg, image->header.stamp); + pubImg.publish(std::move(image)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } +} + +#ifdef FOUND_ISAAC_ROS_NITROS +void ZedCamera::publishImageWithInfo( + const sl::Mat & img, + const nitrosImgPub & nitrosPubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t) +{ + DEBUG_STREAM_VD(" * Publishing NITROS IMAGE message: " << t.nanoseconds() << " nsec"); + try { + size_t dpitch = img.getWidthBytes(); + size_t spitch = img.getStepBytes(sl::MEM::GPU); // SL Mat can be padded + + DEBUG_NITROS( + " * Nitros Image publish - Width: %zu Height: %zu PixelBytes: %zu " + "Spitch: %zu Dpitch: %zu", + img.getWidth(), img.getHeight(), img.getPixelBytes(), spitch, dpitch); + + size_t dbuffer_size{spitch * img.getHeight()}; + void * dbuffer; + CUDA_CHECK(cudaMalloc(&dbuffer, dbuffer_size)); + + DEBUG_NITROS("Sent CUDA Image buffer with memory at: %p", dbuffer); + + // Copy data bytes to CUDA buffer + CUDA_CHECK( + cudaMemcpy2D( + dbuffer, + dpitch, + img.getPtr(sl::MEM::GPU), + spitch, + img.getWidth() * img.getPixelBytes(), img.getHeight(), + cudaMemcpyDeviceToDevice)); + + // Adding header data + std_msgs::msg::Header header; + header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + header.frame_id = imgFrameId; + + auto encoding = img_encodings::BGRA8; // Default encoding + if (img.getDataType() == sl::MAT_TYPE::U8_C1) { + encoding = img_encodings::MONO8; // Mono image + } else if (img.getDataType() == sl::MAT_TYPE::U8_C3) { + encoding = img_encodings::BGR8; // BGR image + } else if (img.getDataType() == sl::MAT_TYPE::F32_C1) { + encoding = img_encodings::TYPE_32FC1; // Float image + } + + // Create NitrosImage wrapping CUDA buffer + nvidia::isaac_ros::nitros::NitrosImage nitros_image = + nvidia::isaac_ros::nitros::NitrosImageBuilder() + .WithHeader(header) + .WithEncoding(encoding) + .WithDimensions(img.getHeight(), img.getWidth()) + .WithGpuData(dbuffer) + //.WithGpuData(img.getPtr(sl::MEM::GPU)) // TODO: Enable direct GPU memory sharing when supported by Isaac ROS. + .Build(); + + if (nitrosPubImg) { + nitrosPubImg->publish(nitros_image); + } + publishCameraInfo(infoPub, camInfoMsg, header.stamp); + publishCameraInfo(infoPubTrans, camInfoMsg, header.stamp); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } +} +#endif + +void ZedCamera::publishDepthMapWithInfo( + const sl::Mat & depth, + const rclcpp::Time & t) +{ + mLeftCamInfoMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + + if (_nitrosDisabled) { + if (!mOpenniDepthMode) { + auto depth_img = sl_tools::imageToROSmsg(depth, mDepthOptFrameId, t, mUsePubTimestamps); + DEBUG_STREAM_VD( + " * Publishing DEPTH message: " << t.nanoseconds() + << " nsec"); + try { + mPubDepth.publish(std::move(depth_img)); + publishCameraInfo(mPubDepthCamInfo, mLeftCamInfoMsg, t); + publishCameraInfo(mPubDepthCamInfoTrans, mLeftCamInfoMsg, t); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } + return; + } + + // OPENNI CONVERSION (meter -> millimeters - float32 -> uint16) + auto openniDepthMsg = std::make_unique(); + + openniDepthMsg->header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + openniDepthMsg->header.frame_id = mDepthOptFrameId; + openniDepthMsg->height = depth.getHeight(); + openniDepthMsg->width = depth.getWidth(); + + int num = 1; // for endianness detection + openniDepthMsg->is_bigendian = !(*reinterpret_cast(&num) == 1); + + openniDepthMsg->step = openniDepthMsg->width * sizeof(uint16_t); + openniDepthMsg->encoding = sensor_msgs::image_encodings::MONO16; + + size_t size = openniDepthMsg->step * openniDepthMsg->height; + openniDepthMsg->data.resize(size); + + uint16_t * data = reinterpret_cast(&openniDepthMsg->data[0]); + + int dataSize = openniDepthMsg->width * openniDepthMsg->height; + sl::float1 * depthDataPtr = depth.getPtr(); + + for (int i = 0; i < dataSize; i++) { + *(data++) = static_cast( + std::round(*(depthDataPtr++) * 1000)); // in mm, rounded + } + + DEBUG_STREAM_VD(" * Publishing OPENNI DEPTH message"); + try { + mPubDepth.publish(std::move(openniDepthMsg)); + publishCameraInfo(mPubDepthCamInfo, mLeftCamInfoMsg, t); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + DEBUG_STREAM_VD(" * Publishing NITROS DEPTH IMAGE message: " << t.nanoseconds() << " nsec"); + try { + size_t dpitch = depth.getWidthBytes(); + size_t spitch = depth.getStepBytes(sl::MEM::GPU); // SL Mat can be padded + + size_t dbuffer_size{dpitch * depth.getHeight()}; + void * dbuffer; + CUDA_CHECK(cudaMalloc(&dbuffer, dbuffer_size)); + + DEBUG_NITROS("Sent Depth CUDA buffer with memory at: %p", dbuffer); + + // Copy data bytes to CUDA buffer + CUDA_CHECK( + cudaMemcpy2D( + dbuffer, + dpitch, + depth.getPtr(sl::MEM::GPU), + spitch, + depth.getWidth() * depth.getPixelBytes(), depth.getHeight(), + cudaMemcpyDeviceToDevice)); + + // Adding header data + std_msgs::msg::Header header; + header.stamp = mUsePubTimestamps ? get_clock()->now() : t; + header.frame_id = mDepthOptFrameId; + + // Create NitrosImage wrapping CUDA buffer + nvidia::isaac_ros::nitros::NitrosImage nitros_image = + nvidia::isaac_ros::nitros::NitrosImageBuilder() + .WithHeader(header) + .WithEncoding(img_encodings::TYPE_32FC1) + .WithDimensions(depth.getHeight(), depth.getWidth()) + .WithGpuData(dbuffer) + //.WithGpuData(depth.getPtr(sl::MEM::GPU)) // TODO: Enable direct GPU memory sharing when supported by Isaac ROS. + .Build(); + + if (mNitrosPubDepth) { + mNitrosPubDepth->publish(nitros_image); + } + publishCameraInfo(mPubDepthCamInfo, mLeftCamInfoMsg, t); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } +#endif + } +} + +void ZedCamera::publishDisparity( + const sl::Mat & disparity, + const rclcpp::Time & t) +{ + sl::CameraInformation zedParam = mZed->getCameraInformation(mMatResol); + + std::unique_ptr disparity_image = + sl_tools::imageToROSmsg(disparity, mDepthOptFrameId, t, mUsePubTimestamps); + + auto disparityMsg = std::make_unique(); + disparityMsg->image = *disparity_image.get(); + disparityMsg->header = disparityMsg->image.header; + disparityMsg->f = + zedParam.camera_configuration.calibration_parameters.left_cam.fx; + disparityMsg->t = zedParam.camera_configuration.calibration_parameters + .getCameraBaseline(); + disparityMsg->min_disparity = + disparityMsg->f * disparityMsg->t / + mZed->getInitParameters().depth_maximum_distance; + disparityMsg->max_disparity = + disparityMsg->f * disparityMsg->t / + mZed->getInitParameters().depth_minimum_distance; + + DEBUG_STREAM_VD(" * Publishing DISPARITY message"); + try { + if (mPubDisparity) { + mPubDisparity->publish(std::move(disparityMsg)); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } +} + +void ZedCamera::processPointCloud() +{ + DEBUG_PC("=== Process Point Cloud ==="); + + if (isPointCloudSubscribed()) { + // Run the point cloud conversion asynchronously to avoid slowing down + // all the program + // Retrieve raw pointCloud data if latest Pointcloud is ready + DEBUG_PC(" * [processPointCloud] pc_lock -> defer"); + std::unique_lock pc_lock(mPcMutex, std::defer_lock); + + DEBUG_PC(" * [processPointCloud] pc_lock -> try_lock"); + if (pc_lock.try_lock()) { + DEBUG_STREAM_PC( + " * [processPointCloud] Retrieving point cloud size: " << mPcResol.width << "x" << + mPcResol.height); + auto pc_err = mZed->retrieveMeasure( + mMatCloud, sl::MEASURE::XYZBGRA, sl::MEM::CPU, + mPcResol); + if (pc_err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), + "Point cloud retrieve error: " << sl::toString(pc_err)); + return; + } + DEBUG_STREAM_PC( + " * [processPointCloud] Retrieved point cloud size: " << mMatCloud.getWidth() << "x" << + mMatCloud.getHeight()); + + // Signal Pointcloud thread that a new pointcloud is ready + mPcDataReadyCondVar.notify_one(); + mPcDataReady = true; + mPcPublishing = true; + + DEBUG_STREAM_PC( + " * [processPointCloud] Extracted point cloud: " << mMatCloud.getInfos().c_str() ); + } else { + DEBUG_PC(" * [processPointCloud] pc_lock not locked"); + } + } else { + mPcPublishing = false; + DEBUG_PC(" * [processPointCloud] No point cloud subscribers"); + } + + DEBUG_PC("=== Process Point Cloud done ==="); +} + +bool ZedCamera::isPointCloudSubscribed() +{ + size_t cloudSubCount = 0; + try { +#ifdef FOUND_POINT_CLOUD_TRANSPORT + cloudSubCount = mPubCloud.getNumSubscribers(); +#else + if (mPubCloud) { + cloudSubCount = count_subscribers(mPubCloud->get_topic_name()); + } +#endif + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_PC( + " * [isPointCloudSubscribed] Exception while counting point cloud " + "subscribers"); + return false; + } + + return cloudSubCount > 0; +} + +void ZedCamera::publishPointCloud() +{ + sl_tools::StopWatch pcElabTimer(get_clock()); + + int width = mPcResol.width; + int height = mPcResol.height; + + int ptsCount = width * height; + + rclcpp::Time stamp; + if (mSvoMode) { + stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else if (mSimMode) { + if (mUseSimTime) { + stamp = mUsePubTimestamps ? get_clock()->now() : mFrameTimestamp; + } else { + stamp = mUsePubTimestamps ? get_clock()->now() : sl_tools::slTime2Ros( + mMatCloud.timestamp); + } + } else { + stamp = mUsePubTimestamps ? get_clock()->now() : sl_tools::slTime2Ros( + mMatCloud.timestamp); + } + + // ---> Check that timestamp is not the same of the latest + // published pointcloud. Avoid publishing the same old data. + if (mLastTs_pc == stamp) { + DEBUG_STREAM_PC(" * [publishPointCloud] ignoring not update data"); + return; + } + mLastTs_pc = stamp; + // <--- Check timestamp + + // Resize the reusable message buffer only when resolution changes + if (static_cast(mPcMsg.width) != width || + static_cast(mPcMsg.height) != height) + { + mPcMsg.header.frame_id = mPointCloudFrameId; + + int val = 1; + mPcMsg.is_bigendian = !(*reinterpret_cast(&val) == 1); + mPcMsg.is_dense = false; + + mPcMsg.width = width; + mPcMsg.height = height; + + sensor_msgs::PointCloud2Modifier modifier(mPcMsg); + modifier.setPointCloud2Fields( + 4, "x", 1, sensor_msgs::msg::PointField::FLOAT32, "y", 1, + sensor_msgs::msg::PointField::FLOAT32, "z", 1, + sensor_msgs::msg::PointField::FLOAT32, "rgb", 1, + sensor_msgs::msg::PointField::FLOAT32); + } + + mPcMsg.header.stamp = stamp; + + sl::Vector4 * cpu_cloud = mMatCloud.getPtr(); + + // Data copy into reused buffer + float * ptCloudPtr = reinterpret_cast(&mPcMsg.data[0]); + memcpy( + ptCloudPtr, reinterpret_cast(cpu_cloud), + ptsCount * 4 * sizeof(float)); + + // Pointcloud publishing + DEBUG_PC(" * [publishPointCloud] Publishing POINT CLOUD message"); +#ifdef FOUND_POINT_CLOUD_TRANSPORT + try { + mPubCloud.publish(mPcMsg); + } catch (std::system_error & e) { + DEBUG_STREAM_PC(" * [publishPointCloud] Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_PC(" * [publishPointCloud] Message publishing generic exception"); + } +#else + try { + if (mPubCloud) { + mPubCloud->publish(mPcMsg); + } + } catch (std::system_error & e) { + DEBUG_STREAM_PC(" * [publishPointCloud] Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_PC(" * [publishPointCloud] Message publishing generic exception"); + } +#endif + + // Publish freq calculation + double mean = mPcPeriodMean_sec->addValue(mPcFreqTimer.toc()); + mPcFreqTimer.tic(); + + // Point cloud elaboration time + mPcProcMean_sec->addValue(pcElabTimer.toc()); + DEBUG_STREAM_PC(" * [publishPointCloud] Point cloud freq: " << 1. / mean); +} + +void ZedCamera::threadFunc_videoDepthElab() +{ + DEBUG_STREAM_VD("Video Depth thread started"); + + // Set the name of the videoDepthElab thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_videoDepthElab")).c_str()); + + setupVideoDepthThread(); + + mVdDataReady = false; + std::unique_lock lock(mVdMutex); + + while (1) { + if (!rclcpp::ok()) { + DEBUG_VD(" * [threadFunc_videoDepthElab] Ctrl+C received: stopping video depth thread"); + break; + } + if (mThreadStop) { + DEBUG_VD( + " * [threadFunc_videoDepthElab] Video/Depth thread stopped"); + break; + } + + if (!waitForVideoDepthData(lock)) { + break; + } + + handleVideoDepthPublishing(); + + mVdDataReady = false; + } + + DEBUG_STREAM_VD("Video/Depth thread finished"); +} + +// Helper: Setup thread scheduling and debug info +void ZedCamera::setupVideoDepthThread() +{ + if (mChangeThreadSched) { + DEBUG_STREAM_ADV("Video/Depth thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default Video/Depth thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + sched_param par; + par.sched_priority = + (mThreadSchedPolicy == "SCHED_FIFO" || mThreadSchedPolicy == "SCHED_RR") ? + mThreadPrioPointCloud : + 0; + + int sched_policy = SCHED_OTHER; + if (mThreadSchedPolicy == "SCHED_OTHER") { + sched_policy = SCHED_OTHER; + } else if (mThreadSchedPolicy == "SCHED_BATCH") { + sched_policy = SCHED_BATCH; + } else if (mThreadSchedPolicy == "SCHED_FIFO") { + sched_policy = SCHED_FIFO; + } else if (mThreadSchedPolicy == "SCHED_RR") { + sched_policy = SCHED_RR; + } else { + RCLCPP_WARN_STREAM( + get_logger(), + " ! Failed to set thread params! - Policy not supported"); + return; + } + + if (pthread_setschedparam(pthread_self(), sched_policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + + if (_debugAdvanced) { + int policy; + sched_param par2; + if (pthread_getschedparam(pthread_self(), &policy, &par2)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New Video/Depth thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par2.sched_priority); + } + } + } +} + +// Helper: Wait for video/depth data or thread stop +bool ZedCamera::waitForVideoDepthData(std::unique_lock & lock) +{ + while (!mVdDataReady) { // loop to avoid spurious wakeups + if (mVdDataReadyCondVar.wait_for(lock, std::chrono::milliseconds(500)) == + std::cv_status::timeout) + { + // Check thread stopping + if (!rclcpp::ok()) { + DEBUG_VD("[waitForVideoDepthData] Ctrl+C received: stopping video/depth thread"); + mThreadStop = true; + return false; + } + if (mThreadStop) { + DEBUG_VD( + "[waitForVideoDepthData] Video/Depth thread stopped"); + return false; + } + + DEBUG_VD(" * [waitForVideoDepthData] Waiting for Video/Depth data"); + } + } + DEBUG_VD(" * [waitForVideoDepthData] Video/Depth data ready to be published"); + DEBUG_STREAM_VD( + " * [waitForVideoDepthData] mVdMutex: " << + (lock.owns_lock() ? "Locked" : "Unlocked")); + return true; +} + +// Helper: Handle publishing and frequency control +void ZedCamera::handleVideoDepthPublishing() +{ + rclcpp::Time pub_ts; + publishVideoDepth(pub_ts); + + // ----> Publish sync sensors data if needed + if (mSensCameraSync) { + if (!sl_tools::isZED(mCamRealModel) && mVdPublishing && + pub_ts != TIMEZERO_ROS) + { + publishSensorsData(pub_ts); + } + } + // <---- Publish sync sensors data if needed + + // ----> Check publishing frequency + double vd_period_usec = 1e6 / mVdPubRate; + double elapsed_usec = mVdPubFreqTimer.toc() * 1e6; + + DEBUG_STREAM_VD(" * [handleVideoDepthPublishing] elapsed_usec " << elapsed_usec); + + int wait_usec = 100; + if (elapsed_usec < vd_period_usec) { + wait_usec = static_cast(vd_period_usec - elapsed_usec); + rclcpp::sleep_for(std::chrono::microseconds(wait_usec)); + DEBUG_STREAM_VD(" * [handleVideoDepthPublishing] wait_usec " << wait_usec); + } else { + rclcpp::sleep_for(std::chrono::microseconds(wait_usec)); + } + DEBUG_STREAM_VD(" * [handleVideoDepthPublishing] sleeped for " << wait_usec << " µsec"); + + mVdPubFreqTimer.tic(); + // <---- Check publishing frequency +} + +void ZedCamera::publishCameraInfos() +{ + rclcpp::Time pub_ts = get_clock()->now(); + + publishCameraInfo(mPubRgbCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRgbCamInfo, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubLeftCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawLeftCamInfo, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRightCamInfo, mRightCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRightCamInfo, mRightCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRgbGrayCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRgbGrayCamInfo, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubLeftGrayCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawLeftGrayCamInfo, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRightGrayCamInfo, mRightCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRightGrayCamInfo, mRightCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubDepthCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubConfMapCamInfo, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRgbCamInfoTrans, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRgbCamInfoTrans, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubLeftCamInfoTrans, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawLeftCamInfoTrans, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRightCamInfoTrans, mRightCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRightCamInfoTrans, mRightCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRgbGrayCamInfoTrans, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRgbGrayCamInfoTrans, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubLeftGrayCamInfoTrans, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawLeftGrayCamInfoTrans, mLeftCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubRightGrayCamInfoTrans, mRightCamInfoMsg, pub_ts); + publishCameraInfo(mPubRawRightGrayCamInfoTrans, mRightCamInfoRawMsg, pub_ts); + publishCameraInfo(mPubDepthCamInfoTrans, mLeftCamInfoMsg, pub_ts); + publishCameraInfo(mPubConfMapCamInfoTrans, mLeftCamInfoMsg, pub_ts); +} + +void ZedCamera::setupPointCloudThread() +{ + if (mChangeThreadSched) { + DEBUG_STREAM_ADV("Point Cloud thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default Point Cloud thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + if (mThreadSchedPolicy == "SCHED_OTHER") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_OTHER, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_BATCH") { + sched_param par; + par.sched_priority = 0; + if (pthread_setschedparam(pthread_self(), SCHED_BATCH, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_FIFO") { + sched_param par; + par.sched_priority = mThreadPrioPointCloud; + if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else if (mThreadSchedPolicy == "SCHED_RR") { + sched_param par; + par.sched_priority = mThreadPrioPointCloud; + if (pthread_setschedparam(pthread_self(), SCHED_RR, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + } else { + RCLCPP_WARN_STREAM( + get_logger(), + " ! Failed to set thread params! - Policy not supported"); + } + + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New Point Cloud thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + } +} + +bool ZedCamera::waitForPointCloudData(std::unique_lock & lock) +{ + while (!mPcDataReady) { // loop to avoid spurious wakeups + if (mPcDataReadyCondVar.wait_for(lock, std::chrono::milliseconds(500)) == + std::cv_status::timeout) + { + // Check thread stopping + if (!rclcpp::ok()) { + DEBUG_PC(" * [waitForPointCloudData] Ctrl+C received: stopping point cloud thread"); + mThreadStop = true; + return false; + } + if (mThreadStop) { + DEBUG_PC( + " * [waitForPointCloudData] Point Cloud thread stopped"); + return false; + } + + DEBUG_PC(" * [waitForPointCloudData] Waiting for point cloud data"); + } + } + DEBUG_PC(" * [waitForPointCloudData] Point Cloud ready to be published"); + DEBUG_STREAM_PC( + " * [waitForPointCloudData] mPcMutex: " << + (lock.owns_lock() ? "Locked" : "Unlocked")); + return true; +} + +void ZedCamera::handlePointCloudPublishing() +{ + publishPointCloud(); + + // ----> Check publishing frequency + double pc_period_usec = 1e6 / mPcPubRate; + + double elapsed_usec = mPcPubFreqTimer.toc() * 1e6; + + DEBUG_STREAM_PC(" * [handlePointCloudPublishing] elapsed_usec " << elapsed_usec); + + int wait_usec = 100; + if (elapsed_usec < pc_period_usec) { + wait_usec = static_cast(pc_period_usec - elapsed_usec); + rclcpp::sleep_for(std::chrono::microseconds(wait_usec)); + DEBUG_STREAM_PC(" * [handlePointCloudPublishing] wait_usec " << wait_usec); + } else { + rclcpp::sleep_for(std::chrono::microseconds(wait_usec)); + } + DEBUG_STREAM_PC(" * [handlePointCloudPublishing] sleeped for " << wait_usec << " µsec"); + + mPcPubFreqTimer.tic(); + // <---- Check publishing frequency + + mPcDataReady = false; +} + +void ZedCamera::threadFunc_pointcloudElab() +{ + DEBUG_STREAM_PC("Point Cloud thread started"); + + // Set the name of the pointcloudElab thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_pointcloudElab")).c_str()); + + setupPointCloudThread(); + + mPcDataReady = false; + + std::unique_lock lock(mPcMutex); + + while (1) { + if (!rclcpp::ok()) { + DEBUG_PC(" * [threadFunc_pointcloudElab] Ctrl+C received: stopping point cloud thread"); + break; + } + if (mThreadStop) { + DEBUG_STREAM_PC( + " * [threadFunc_pointcloudElab] Point Cloud thread stopped"); + break; + } + + if (!waitForPointCloudData(lock)) { + break; + } + + handlePointCloudPublishing(); + } + + DEBUG_STREAM_PC("Pointcloud thread finished"); +} + +bool ZedCamera::handleVideoDepthDynamicParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + DEBUG_VD("handleVideoDepthDynamicParams"); + + // Split handling into smaller helper functions for maintainability + if (sl_tools::isZEDX(mCamRealModel)) { + if (handleGmsl2Params(param, result)) {return true;} + } else { + if (handleUsb3Params(param, result)) {return true;} + } + if (handleCommonVideoParams(param, result)) {return true;} + if (handleDepthParams(param, result)) {return true;} + + // If parameter was not handled, return true (no error, but not handled) + return true; +} + +// Helper for GMSL2 (ZED X) parameters +bool ZedCamera::handleGmsl2Params( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + const std::string & name = param.get_name(); + if (name == "video.exposure_time" || + name == "video.auto_exposure_time_range_min" || + name == "video.auto_exposure_time_range_max" || + name == "video.exposure_compensation" || + name == "video.analog_gain" || + name == "video.auto_analog_gain_range_min" || + name == "video.auto_analog_gain_range_max" || + name == "video.digital_gain" || + name == "video.auto_digital_gain_range_min" || + name == "video.auto_digital_gain_range_max" || + name == "video.denoising") + { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_INTEGER; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + int val = param.as_int(); + if (name == "video.exposure_time") { + mGmslExpTime = val; + mCamAutoExpGain = false; + } else if (name == "video.auto_exposure_time_range_min") { + mGmslAutoExpTimeRangeMin = val; + } else if (name == "video.auto_exposure_time_range_max") { + mGmslAutoExpTimeRangeMax = val; + } else if (name == "video.exposure_compensation") { + mGmslExposureComp = val; + } else if (name == "video.analog_gain") { + mGmslAnalogGain = val; + mCamAutoExpGain = false; + } else if (name == "video.auto_analog_gain_range_min") { + mGmslAnalogGainRangeMin = val; + } else if (name == "video.auto_analog_gain_range_max") { + mGmslAnalogGainRangeMax = val; + } else if (name == "video.digital_gain") { + mGmslDigitalGain = val; + mCamAutoExpGain = false; + } else if (name == "video.auto_digital_gain_range_min") { + mGmslAutoDigitalGainRangeMin = val; + } else if (name == "video.auto_digital_gain_range_max") { + mGmslAutoDigitalGainRangeMax = val; + } else if (name == "video.denoising") { + mGmslDenoising = val; + } + mCamSettingsDirty = true; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } + return false; +} + +// Helper for USB3 (ZED/ZED2) parameters +bool ZedCamera::handleUsb3Params( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + const std::string & name = param.get_name(); + if (name == "video.brightness" || name == "video.contrast" || name == "video.hue") { + if (sl_tools::isZEDX(mCamRealModel)) { + RCLCPP_WARN_STREAM( + get_logger(), + "Parameter '" << name << "' not available for " << sl::toString(mCamRealModel).c_str()); + return true; + } + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_INTEGER; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + int val = param.as_int(); + if (name == "video.brightness") { + mCamBrightness = val; + } else if (name == "video.contrast") { + mCamContrast = val; + } else if (name == "video.hue") { + mCamHue = val; + } + mCamSettingsDirty = true; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } + return false; +} + +// Helper for common video parameters +bool ZedCamera::handleCommonVideoParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + const std::string & name = param.get_name(); + if (name == "video.saturation" || name == "video.sharpness" || name == "video.gamma" || + name == "video.exposure" || name == "video.gain" || name == "video.whitebalance_temperature") + { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_INTEGER; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + int val = param.as_int(); + if (name == "video.saturation") { + mCamSaturation = val; + } else if (name == "video.sharpness") { + mCamSharpness = val; + } else if (name == "video.gamma") { + mCamGamma = val; + } else if (name == "video.exposure") { + mCamExposure = val; + mCamAutoExpGain = false; + } else if (name == "video.gain") { + mCamGain = val; + mCamAutoExpGain = false; + } else if (name == "video.whitebalance_temperature") { + mCamWBTemp = val * 100; + mCamAutoWB = false; + } + mCamSettingsDirty = true; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } else if (name == "video.auto_exposure_gain") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + bool val = param.as_bool(); + if (val != mCamAutoExpGain) { + mTriggerAutoExpGain = true; + } + mCamAutoExpGain = val; + mCamSettingsDirty = true; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } else if (name == "video.auto_whitebalance") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + bool val = param.as_bool(); + if (val != mCamAutoWB) { + mTriggerAutoWB = true; + } + mCamAutoWB = val; + mCamSettingsDirty = true; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } else if (name == "general.pub_frame_rate") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + double val = param.as_double(); + if (val < -1.0 || val > mCamGrabFrameRate) { + result.successful = false; + result.reason = name + " must be >= -1 and <= `grab_frame_rate` (0 or -1 = no limit)"; + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + if (val <= 0.0) { + val = static_cast(mCamGrabFrameRate); + } + mVdPubRate = val; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } + return false; +} + +// Helper for depth-related parameters +bool ZedCamera::handleDepthParams( + const rclcpp::Parameter & param, + rcl_interfaces::msg::SetParametersResult & result) +{ + const std::string & name = param.get_name(); + if (name == "depth.point_cloud_freq") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_DOUBLE; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + double val = param.as_double(); + if (val < -1.0 || val > mCamGrabFrameRate) { + result.successful = false; + result.reason = name + " must be >= -1 and <= `grab_frame_rate` (0 or -1 = no limit)"; + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + if (val <= 0.0) { + val = static_cast(mCamGrabFrameRate); + } + mPcPubRate = val; + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } else if (name == "depth.depth_confidence" || name == "depth.depth_texture_conf") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_INTEGER; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + int val = param.as_int(); + if (name == "depth.depth_confidence") { + mDepthConf = val; + } else if (name == "depth.depth_texture_conf") { + mDepthTextConf = val; + } + + DEBUG_STREAM_DYN_PARAMS("Parameter '" << name << "' correctly set to " << val); + return true; + } else if (name == "depth.remove_saturated_areas") { + rclcpp::ParameterType correctType = rclcpp::ParameterType::PARAMETER_BOOL; + if (param.get_type() != correctType) { + result.successful = false; + result.reason = name + " must be a " + rclcpp::to_string(correctType); + RCLCPP_WARN_STREAM(get_logger(), result.reason); + return true; + } + mRemoveSatAreas = param.as_bool(); + DEBUG_STREAM_DYN_PARAMS( + + "Parameter '" + << name << "' correctly set to " + << (mRemoveSatAreas ? "TRUE" : "FALSE")); + return true; + } + return false; +} + +} // namespace stereolabs diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/include/zed_camera_one_component.hpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/include/zed_camera_one_component.hpp new file mode 100644 index 000000000..74c79638a --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/include/zed_camera_one_component.hpp @@ -0,0 +1,549 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ZED_CAMERA_ONE_COMPONENT_HPP_ +#define ZED_CAMERA_ONE_COMPONENT_HPP_ + +#define ENABLE_STREAM_INPUT 1 + +#include +#include + +#include "sl_version.hpp" +#include "sl_tools.hpp" +#include "sl_types.hpp" +#include "visibility_control.hpp" + +namespace stereolabs +{ + +class ZedCameraOne : public rclcpp::Node +{ +public: + ZED_COMPONENTS_PUBLIC + explicit ZedCameraOne(const rclcpp::NodeOptions & options); + + virtual ~ZedCameraOne(); + +protected: + // ----> Initialization functions + void initNode(); + void deInitNode(); + + void initParameters(); + void initServices(); + void initTFCoordFrameNames(); + void initPublishers(); + void initVideoPublishers(); + void initSensorPublishers(); + void initializeTimestamp(); + void initializeDiagnosticStatistics(); + void initThreadsAndTimers(); + + void getSensorsParams(); + void getDebugParams(); + void getVideoParams(); + void getGeneralParams(); + void getTopicEnableParams(); + void getSvoParams(); + void getStreamParams(); + void getCameraModelParams(); + void getCameraInfoParams(); + void getResolutionParams(); + void getOpencvCalibrationParam(); + + void getStreamingServerParams(); + void getAdvancedParams(); + + bool startCamera(); + void closeCamera(); + void createZedObject(); + void logSdkVersion(); + void setupTf2(); + void configureZedInput(); + void setZedInitParams(); + bool openZedCamera(); + void processCameraInformation(); + void setupCameraInfoMessages(); + + void startTempPubTimer(); + void startHeartbeatTimer(); + bool startStreamingServer(); + void stopStreamingServer(); + bool startSvoRecording(std::string & errMsg); + void stopSvoRecording(); + // <---- Initialization functions + + // ----> Utility functions + void fillCamInfo( + sensor_msgs::msg::CameraInfo::SharedPtr camInfoMsg, + const std::string & frameId, bool rawParam = false); + + void applyDynamicSettings(); + void applySaturationSharpnessGamma(); + void applyWhiteBalance(); + void applyExposure(); + void applyAnalogGain(); + void applyDigitalGain(); + void applyExposureCompensationAndDenoising(); + + bool areImageTopicsSubscribed(); + bool areSensorsTopicsSubscribed(); + void retrieveImages(bool gpu); + void publishImages(); + void publishColorImage(const rclcpp::Time & timeStamp); + void publishColorRawImage(const rclcpp::Time & timeStamp); + void publishGrayImage(const rclcpp::Time & timeStamp); + void publishGrayRawImage(const rclcpp::Time & timeStamp); + void publishCameraInfos(); // Used to publish camera infos when no video/depth is subscribed + + void publishImageWithInfo( + const sl::Mat & img, + const image_transport::Publisher & pubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t); +#ifdef FOUND_ISAAC_ROS_NITROS + void publishImageWithInfo( + const sl::Mat & img, + const nitrosImgPub & nitrosPubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t); +#endif + void publishCameraInfo( + const camInfoPub & infoPub, + camInfoMsgPtr & camInfoMsg, const rclcpp::Time & t); + bool publishSensorsData(); + void publishImuFrameAndTopic(); + bool publishSvoStatus(uint64_t frame_ts); + + void updateImuFreqDiagnostics(double dT); + void publishImuMsg(const rclcpp::Time & ts_imu, const sl::SensorsData & sens_data); + void publishImuRawMsg(const rclcpp::Time & ts_imu, const sl::SensorsData & sens_data); + + void publishClock(const sl::Timestamp & ts); + + void updateCaptureDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateInputModeDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateImageDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateSvoDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateTfImuDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateImuDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateTemperatureDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateSvoRecordingDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + void updateStreamingServerDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat); + + void setupGrabThreadPolicy(); + void initializeGrabThreadStatus(); + bool checkGrabThreadInterruption(); + void handleDynamicSettings(); + void updateGrabFrequency(); + bool performCameraGrab(); + void updateFrameTimestamp(); + void publishSvoClock(); + void handleStreamingServer(); + void handleSvoRecordingStatus(); + void handleImageRetrievalAndPublishing(); + + void setupSensorThreadScheduling(); + bool handleSensorThreadInterruption(); + bool waitForCameraOpen(); + bool waitForSensorSubscribers(); + bool handleSensorPublishing(); + void adjustSensorPublishingFrequency(); + + bool handleDynamicVideoParam( + const rclcpp::Parameter & param, const std::string & param_name, + int & count_ok, rcl_interfaces::msg::SetParametersResult & result); + bool handleSaturationSharpnessGamma( + const rclcpp::Parameter & param, + const std::string & param_name, int & count_ok); + bool handleWhiteBalance( + const rclcpp::Parameter & param, const std::string & param_name, + int & count_ok); + bool handleExposure( + const rclcpp::Parameter & param, const std::string & param_name, + int & count_ok); + bool handleAnalogGain( + const rclcpp::Parameter & param, const std::string & param_name, + int & count_ok); + bool handleDigitalGain( + const rclcpp::Parameter & param, const std::string & param_name, + int & count_ok); + // <---- Utility functions + + // ----> Callbacks functions + rcl_interfaces::msg::SetParametersResult callback_dynamicParamChange( + std::vector parameters); + void callback_updateDiagnostic( + diagnostic_updater::DiagnosticStatusWrapper & stat); + void callback_pubTemp(); + void callback_pubHeartbeat(); + + void callback_enableStreaming( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_startSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_stopSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_pauseSvoInput( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + void callback_setSvoFrame( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res); + + // <---- Callbacks functions + + // ----> Thread functions + void threadFunc_zedGrab(); + void threadFunc_pubSensorsData(); + // <---- Threads functions + +private: + // ----> ZED SDK + std::shared_ptr _zed; + sl::InitParametersOne _initParams; + // <---- ZED SDK + + // ----> Threads and Timers + std::thread _grabThread; // Main grab thread + std::thread _sensThread; // Sensors data publish thread + + std::atomic _threadStop; + rclcpp::TimerBase::SharedPtr _initTimer; + rclcpp::TimerBase::SharedPtr _tempPubTimer; // Timer to retrieve and publish camera temperature + rclcpp::TimerBase::SharedPtr _heartbeatTimer; + // <---- Threads and Timers + + // ----> Thread Sync + std::mutex _recMutex; + // <---- Thread Sync + + // ----> Debug variables + bool _debugCommon = false; + bool _debugDynParams = false; + bool _debugVideoDepth = false; + bool _debugSensors = false; + bool _debugCamCtrl = false; + bool _debugStreaming = false; + bool _debugAdvanced = false; + bool _debugNitros = false; + bool _debugTf = false; + // If available, force disable NITROS usage for debugging and testing + // purposes; otherwise, this is always true. + bool _nitrosDisabled = false; + // <---- Debug variables + + // ----> QoS + // https://github.com/ros2/ros2/wiki/About-Quality-of-Service-Settings + rclcpp::QoS _qos; + rclcpp::PublisherOptions _pubOpt; + rclcpp::SubscriptionOptions _subOpt; + // <---- QoS + + // ----> Topics + std::string _topicRoot = "~/"; + std::string _imgColorTopic; + std::string _imgColorRawTopic; + std::string _imgGrayTopic; + std::string _imgRawGrayTopic; + + std::string _sensImuTopic; + std::string _sensImuRawTopic; + std::string _sensTempTopic; + // <---- Topics + + // ----> Publishers + // Image publishers + image_transport::Publisher _pubColorImg; + image_transport::Publisher _pubColorRawImg; + image_transport::Publisher _pubGrayImg; + image_transport::Publisher _pubGrayRawImg; + +#ifdef FOUND_ISAAC_ROS_NITROS + // Nitros image publishers with camera info + nitrosImgPub _nitrosPubColorImg; + nitrosImgPub _nitrosPubColorRawImg; + nitrosImgPub _nitrosPubGrayImg; + nitrosImgPub _nitrosPubGrayRawImg; +#endif + + // Camera Info publishers + camInfoPub _pubColorImgInfo; + camInfoPub _pubColorRawImgInfo; + camInfoPub _pubGrayImgInfo; + camInfoPub _pubGrayRawImgInfo; + camInfoPub _pubColorImgInfoTrans; // `camera_info` topic for the transported image (compressed, theora, nitros, etc) + camInfoPub _pubColorRawImgInfoTrans; // `camera_info` topic for the transported image (compressed, theora, nitros, etc) + camInfoPub _pubGrayImgInfoTrans; // `camera_info` topic for the transported image (compressed, theora, nitros, etc) + camInfoPub _pubGrayRawImgInfoTrans; // `camera_info` topic for the transported image (compressed, theora, nitros, etc) + + // Sensor publishers + imuPub _pubImu; + imuPub _pubImuRaw; + tempPub _pubTemp; + + // Camera-IMU Transform publisher + transfPub _pubCamImuTransf; + + // SVO Clock publisher + clockPub _pubClock; + + // SVO Status publisher + svoStatusPub _pubSvoStatus; + + // Heartbeat Status publisher + heartbeatStatusPub _pubHeartbeatStatus; + // <---- Publishers + + // ----> Publisher variables + bool _usingIPC = false; + sl::Timestamp _lastTs_grab = 0; // Used to calculate stable publish frequency + sl::Timestamp _sdkGrabTS = 0; + std::atomic _colorSubCount; + std::atomic _colorRawSubCount; + std::atomic _graySubCount = 0; + std::atomic _grayRawSubCount = 0; + + std::atomic _imuSubCount; + std::atomic _imuRawSubCount; + double _sensRateComp = 1.0; + + sl::Mat _matColor, _matColorRaw; + sl::Mat _matGray, _matGrayRaw; + // <---- Publisher variables + + // ----> Parameters + std::string _cameraName = "zed_one"; // Name of the camera + int _camGrabFrameRate = 30; // Grab frame rate + sl::RESOLUTION _camResol = sl::RESOLUTION::HD1080; // Default resolution: RESOLUTION_HD1080 + PubRes _pubResolution = PubRes::NATIVE; // Use native grab resolution by default + double _customDownscaleFactor = 1.0; // Used to rescale data with user factor + bool _cameraFlip = false; // Camera flipped? + bool _enableHDR = false; // Enable HDR if supported? + std::string _opencvCalibFile; // Custom OpenCV calibration file + int _sdkVerbose = 0; // SDK verbose level + std::string _sdkVerboseLogFile = ""; // SDK Verbose Log file + int _gpuId = -1; // GPU ID + bool _usePubTimestamps = false; // Use publishing timestamp instead of grab timestamp + bool _grabOnce = false; + bool _grabImuOnce = false; + + int _camSerialNumber = 0; // Camera serial number + int _camId = -1; // Camera ID + + sl::MODEL _camUserModel = sl::MODEL::ZED_XONE_GS; // Default camera model + + //Topic enabler parameters +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + bool _24bitMode = false; +#endif + bool _publishImgRgb = true; + bool _publishImgRaw = false; + bool _publishImgGray = false; + bool _publishSensImu = true; + bool _publishSensImuRaw = false; + bool _publishSensImuTransf = false; + bool _publishSensImuTF = false; + bool _publishSensTemp = false; + + std::string _svoFilepath = ""; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + std::string _svoDecryptionKey = ""; +#endif + bool _svoLoop = false; + bool _svoRealtime = false; + int _svoFrameStart = 0; + bool _useSvoTimestamp = false; + bool _publishSvoClock = false; + bool _publishStatus = true; + + std::string _streamAddr = ""; // Address for local streaming input + int _streamPort = 10000; + + bool _changeThreadSched = false; + std::string _threadSchedPolicy; + int _threadPrioGrab; + int _threadPrioSens; + + std::atomic _streamingServerRequired; + sl::STREAMING_CODEC _streamingServerCodec = sl::STREAMING_CODEC::H264; + int _streamingServerPort = 30000; + int _streamingServerBitrate = 12500; + int _streamingServerGopSize = -1; + bool _streamingServerAdaptiveBitrate = false; + int _streamingServerChunckSize = 16084; + int _streamingServerTargetFramerate = 0; + + double _sensPubRate = 200.; + // <---- Parameters + + // ----> Dynamic params + OnSetParametersCallbackHandle::SharedPtr _paramChangeCallbackHandle; + + int _camSaturation = 4; + int _camSharpness = 4; + int _camGamma = 8; + bool _camAutoWB = true; + int _camWBTemp = 42; + + bool _camAutoExposure = true; + int _camExpTime = 16666; + int _camAutoExpTimeRangeMin = 28; + int _camAutoExpTimeRangeMax = 30000; + int _camExposureComp = 50; + bool _camAutoAnalogGain = true; + int _camAnalogGain = 8000; + int _camAutoAnalogGainRangeMin = 1000; + int _camAutoAnalogGainRangeMax = 16000; + bool _camAutoDigitalGain = true; + int _camDigitalGain = 128; + int _camAutoDigitalGainRangeMin = 1; + int _camAutoDigitalGainRangeMax = 256; + int _camDenoising = 50; + std::unordered_map _camDynParMapChanged; + // <---- Dynamic params + + // ----> Running status + bool _debugMode = false; // Debug mode active? + bool _svoMode = false; // Input from SVO? + std::atomic _svoPause{false}; // SVO pause status + int _svoFrameId = 0; // Current SVO frame ID + int _svoFrameCount = 0; // Total number of frames in SVO + bool _streamMode = false; // Expecting local streaming data? + + std::atomic _triggerUpdateDynParams; // Trigger auto exposure/gain + + bool _recording = false; + sl::RecordingStatus _recStatus = sl::RecordingStatus(); + + uint64_t _heartbeatCount = 0; + // <---- Running status + + // ----> Timestamps + rclcpp::Time _frameTimestamp; + rclcpp::Time _lastTs_imu; + // <---- Timestamps + + // ----> TF handling + std::unique_ptr _tfBuffer; + std::unique_ptr _tfListener; + std::unique_ptr _tfBroadcaster; + std::unique_ptr _staticTfBroadcaster; + + // Camera IMU transform + sl::Transform _slCamImuTransf; + // <---- TF handling + + // ----> Camera info + sl::MODEL _camRealModel; // Camera model requested to SDK + unsigned int _camFwVersion; // Camera FW version + unsigned int _sensFwVersion; // Sensors FW version + // <---- Camera info + + // ----> Stereolabs Mat Info + int _camWidth; // Camera frame width + int _camHeight; // Camera frame height + sl::Resolution _matResol; + // <---- Stereolabs Mat Info + + // ----> Camera infos + camInfoMsgPtr _camInfoMsg; + camInfoMsgPtr _camInfoRawMsg; + // <---- Camera infos + + // ----> Frame IDs + bool _staticImuTfPublished = false; + std::string _cameraLinkFrameId; + std::string _cameraCenterFrameId; + std::string _camImgFrameId; + std::string _camOptFrameId; + std::string _imuFrameId; + // <---- Frame IDs + + // ----> Diagnostic variables + diagnostic_updater::Updater _diagUpdater; // Diagnostic Updater + + sl_tools::StopWatch _uptimer; + + sl::ERROR_CODE _connStatus = sl::ERROR_CODE::LAST; // Connection status + sl::ERROR_CODE _grabStatus = sl::ERROR_CODE::LAST; // Grab status + float _tempImu = NOT_VALID_TEMP; + uint64_t _frameCount = 0; + uint32_t _svoLoopCount = 0; + + std::unique_ptr _elabPeriodMean_sec; + std::unique_ptr _grabPeriodMean_sec; + std::unique_ptr _imagePeriodMean_sec; + std::unique_ptr _imageElabMean_sec; + std::unique_ptr _imuPeriodMean_sec; + std::unique_ptr _pubImuTF_sec; + std::unique_ptr _pubImu_sec; + bool _imuPublishing = false; + bool _videoPublishing = false; + bool _imageSubscribed = false; + + sl_tools::StopWatch _grabFreqTimer; + sl_tools::StopWatch _imuFreqTimer; + sl_tools::StopWatch _imuTfFreqTimer; + sl_tools::StopWatch _imgPubFreqTimer; + int _sysOverloadCount = 0; + + std::atomic _streamingServerRunning; + // <---- Diagnostic variables + + // ----> SVO Recording parameters + unsigned int _svoRecBitrate = 0; + sl::SVO_COMPRESSION_MODE _svoRecCompression = sl::SVO_COMPRESSION_MODE::H265; + unsigned int _svoRecFramerate = 0; + bool _svoRecTranscode = false; + std::string _svoRecFilename; + // <---- SVO Recording parameters + + // ----> Services + enableStreamingPtr _srvEnableStreaming; + startSvoRecSrvPtr _srvStartSvoRec; + stopSvoRecSrvPtr _srvStopSvoRec; + pauseSvoSrvPtr _srvPauseSvo; + setSvoFramePtr _srvSetSvoFrame; + + sl_tools::StopWatch _setSvoFrameCheckTimer; + // <---- Services + + // ----> Services names + const std::string _srvEnableStreamingName = "enable_streaming"; + const std::string _srvStartSvoRecName = "start_svo_rec"; + const std::string _srvStopSvoRecName = "stop_svo_rec"; + const std::string _srvToggleSvoPauseName = "toggle_svo_pause"; + const std::string _srvSetSvoFrameName = "set_svo_frame"; + // <---- Services names +}; + +} // namespace stereolabs + +#endif // ZED_CAMERA_ONE_COMPONENT_HPP_ diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_main.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_main.cpp new file mode 100644 index 000000000..3c20540fb --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_main.cpp @@ -0,0 +1,2605 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_one_component.hpp" +#include "sl_logging.hpp" + +#include +#include + +#include + +using namespace std::chrono_literals; +using namespace std::placeholders; + +namespace stereolabs +{ + +ZedCameraOne::ZedCameraOne(const rclcpp::NodeOptions & options) +: Node("zed_node_one", options), + _threadStop(false), + _qos(QOS_QUEUE_SIZE), + _diagUpdater(this), + _grabFreqTimer(get_clock()), + _imuFreqTimer(get_clock()), + _imuTfFreqTimer(get_clock()), + _imgPubFreqTimer(get_clock()), + _frameTimestamp(TIMEZERO_ROS), + _lastTs_imu(TIMEZERO_ROS), + _colorSubCount(0), + _colorRawSubCount(0), + _graySubCount(0), + _grayRawSubCount(0), + _imuSubCount(0), + _imuRawSubCount(0), + _streamingServerRequired(false), + _streamingServerRunning(false), + _triggerUpdateDynParams(true), + _uptimer(get_clock()), + _setSvoFrameCheckTimer(get_clock()) +{ + _usingIPC = options.use_intra_process_comms(); + + RCLCPP_INFO(get_logger(), "================================"); + RCLCPP_INFO(get_logger(), " ZED Camera One Component "); + RCLCPP_INFO(get_logger(), "================================"); + RCLCPP_INFO(get_logger(), " * namespace: %s", get_namespace()); + RCLCPP_INFO(get_logger(), " * node name: %s", get_name()); + RCLCPP_INFO(get_logger(), " * IPC: %s", _usingIPC ? "enabled" : "disabled"); + RCLCPP_INFO(get_logger(), "================================"); + + // Set the name of the main thread for easier identification in + // system monitors + pthread_setname_np( + pthread_self(), + (get_name() + std::string("_main")).c_str()); + + if (((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < + (SDK_MAJOR_MIN_SUPP * 10 + SDK_MINOR_MIN_SUPP)) || + ((ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) > + (SDK_MAJOR_MAX_SUPP * 10 + SDK_MINOR_MAX_SUPP))) + { + RCLCPP_ERROR_STREAM( + get_logger(), + "This version of the ZED ROS2 wrapper is designed to work with ZED SDK " + "v" << static_cast(SDK_MAJOR_MIN_SUPP) + << "." << static_cast(SDK_MINOR_MIN_SUPP) + << " or newer up to v" << static_cast(SDK_MAJOR_MAX_SUPP) + << "." << static_cast(SDK_MINOR_MAX_SUPP) << "."); + RCLCPP_INFO_STREAM( + get_logger(), "* Detected SDK v" + << ZED_SDK_MAJOR_VERSION << "." + << ZED_SDK_MINOR_VERSION << "." + << ZED_SDK_PATCH_VERSION << "-" + << ZED_SDK_BUILD_ID); + RCLCPP_INFO(get_logger(), "Node stopped. Press Ctrl+C to exit."); + exit(EXIT_FAILURE); + } + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 50 + sl::setEnvironmentVariable("ZED_SDK_DISABLE_PROGRESS_BAR_LOG", "1"); +#endif + +// ----> Publishers/Subscribers options +#ifndef FOUND_FOXY + _pubOpt.qos_overriding_options = + rclcpp::QosOverridingOptions::with_default_policies(); + _subOpt.qos_overriding_options = + rclcpp::QosOverridingOptions::with_default_policies(); +#endif + // <---- Publishers/Subscribers options + + // ----> Start a "one shot timer" to initialize the node + // This is required to make `shared_from_this` available + std::chrono::milliseconds init_msec(static_cast(50.0)); + _initTimer = create_wall_timer( + std::chrono::duration_cast(init_msec), + std::bind(&ZedCameraOne::initNode, this)); + // <---- Start a "one shot timer" to initialize the node +} + +ZedCameraOne::~ZedCameraOne() +{ + deInitNode(); + DEBUG_STREAM_COMM( + "ZED Component destroyed:" << this->get_fully_qualified_name()); +} + +void ZedCameraOne::deInitNode() +{ + DEBUG_STREAM_SENS("Stopping temperatures timer"); + if (_tempPubTimer) { + _tempPubTimer->cancel(); + } + + // ----> Verify that all the threads are not active + DEBUG_STREAM_COMM("Stopping running threads"); + if (!_threadStop) { + _threadStop = true; + } + + DEBUG_STREAM_COMM("Waiting for grab thread..."); + try { + if (_grabThread.joinable()) { + _grabThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Grab thread joining exception: " << e.what()); + } + DEBUG_STREAM_COMM("... grab thread stopped"); + + DEBUG_STREAM_COMM("Waiting for sensors thread..."); + try { + if (_sensThread.joinable()) { + _sensThread.join(); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Sensors thread joining exception: " << e.what()); + } + DEBUG_STREAM_COMM("... sensors thread stopped"); + // <---- Verify that all the threads are not active + + // ----> Close the ZED camera + closeCamera(); + // <---- Close the ZED camera +} + +void ZedCameraOne::closeCamera() +{ + if (_zed == nullptr) { + return; + } + + RCLCPP_INFO(get_logger(), "=== CLOSING CAMERA ==="); + _zed->close(); + _zed.reset(); + RCLCPP_INFO(get_logger(), "=== CAMERA CLOSED ==="); +} + +void ZedCameraOne::initParameters() +{ + // DEBUG parameters + getDebugParams(); + + // SVO parameters + getSvoParams(); + + // GENERAL parameters + getGeneralParams(); + + // TOPIC selection parameters + getTopicEnableParams(); + + // Image Parameters + if (!_svoMode) { + getVideoParams(); + } + + // SENSORS parameters + getSensorsParams(); + + // STREAMING SERVER parameters + getStreamingServerParams(); + + // ADVANCED parameters + getAdvancedParams(); +} + +void ZedCameraOne::getGeneralParams() +{ + rclcpp::Parameter paramVal; + RCLCPP_INFO(get_logger(), "=== GENERAL parameters ==="); + + if (!_svoMode) { + getStreamParams(); + } + getCameraModelParams(); + getCameraInfoParams(); + getResolutionParams(); + getOpencvCalibrationParam(); +} + +void ZedCameraOne::getTopicEnableParams() +{ + RCLCPP_INFO(get_logger(), "=== TOPIC selection parameters ==="); + + // General topics + sl_tools::getParam( + shared_from_this(), "general.publish_status", + _publishStatus, _publishStatus, " * Publish Status: "); + + // Image topics +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + sl_tools::getParam( + shared_from_this(), "video.enable_24bit_output", + _24bitMode, _24bitMode); + if (_24bitMode) { + RCLCPP_INFO(get_logger(), " * Image format: BGR 24-bit"); + } else { + RCLCPP_INFO(get_logger(), " * Image format: BGRA 32-bit"); + } +#endif + sl_tools::getParam( + shared_from_this(), "video.publish_rgb", _publishImgRgb, + _publishImgRgb, " * Publish RGB image: "); + sl_tools::getParam( + shared_from_this(), "video.publish_raw", _publishImgRaw, + _publishImgRaw, " * Publish Raw images: "); + sl_tools::getParam( + shared_from_this(), "video.publish_gray", _publishImgGray, + _publishImgGray, " * Publish Gray images: "); + + // Sensor topics + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu", _publishSensImu, + _publishSensImu, " * Publish IMU: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu_raw", _publishSensImuRaw, + _publishSensImuRaw, " * Publish IMU Raw: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_cam_imu_transf", _publishSensImuTransf, + _publishSensImuTransf, " * Publish Camera/IMU Transf.: "); + sl_tools::getParam( + shared_from_this(), "sensors.publish_temp", _publishSensTemp, + _publishSensTemp, " * Publish Temperature: "); + +} + +void ZedCameraOne::getSvoParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== SVO INPUT parameters ==="); + + sl_tools::getParam( + shared_from_this(), "svo.svo_path", std::string(), + _svoFilepath); + if (_svoFilepath.compare("live") == 0) { + _svoFilepath = ""; + } + + if (_svoFilepath == "") { + _svoMode = false; + } else { + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO: '" << _svoFilepath.c_str() << "'"); + _svoMode = true; + sl_tools::getParam( + shared_from_this(), "svo.use_svo_timestamps", + _useSvoTimestamp, _useSvoTimestamp, + " * Use SVO timestamp: "); + if (_useSvoTimestamp) { + sl_tools::getParam( + shared_from_this(), "svo.publish_svo_clock", + _publishSvoClock, _publishSvoClock, + " * Publish SVO timestamp: "); + } + + sl_tools::getParam(shared_from_this(), "svo.svo_loop", _svoLoop, _svoLoop); + if (_useSvoTimestamp) { + if (_svoLoop) { + RCLCPP_WARN( + get_logger(), + "SVO Loop is not supported when using SVO timestamps. Loop " + "playback disabled."); + _svoLoop = false; + } + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO Loop: " << (_svoLoop ? "TRUE" : "FALSE")); + } + sl_tools::getParam( + shared_from_this(), "svo.svo_realtime", _svoRealtime, + _svoRealtime, " * SVO Realtime: "); + + sl_tools::getParam( + shared_from_this(), "svo.play_from_frame", + _svoFrameStart, _svoFrameStart, + " * SVO start frame: ", false, 0); + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + sl_tools::getParam( + shared_from_this(), "svo.decryption_key", std::string(), + _svoDecryptionKey); +#endif + } +} + +void ZedCameraOne::getStreamParams() +{ + _streamMode = false; + if (!_svoMode) { + sl_tools::getParam(shared_from_this(), "stream.stream_address", std::string(), _streamAddr); + if (_streamAddr != "") { +#if ENABLE_STREAM_INPUT + _streamMode = true; + sl_tools::getParam(shared_from_this(), "stream.stream_port", _streamPort, _streamPort); + RCLCPP_INFO_STREAM( + get_logger(), " * Local stream input: " << _streamAddr << ":" << _streamPort); +#else + RCLCPP_ERROR_STREAM( + get_logger(), + "Local stream input is not enabled in this version. This feature will be available later"); + exit(EXIT_FAILURE); +#endif + } + } +} + +void ZedCameraOne::getCameraModelParams() +{ + std::string camera_model = "zed"; + sl_tools::getParam(shared_from_this(), "general.camera_model", camera_model, camera_model); + if (camera_model == "zedxonegs") { + _camUserModel = sl::MODEL::ZED_XONE_GS; + if (_svoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " << sl::toString( + _camUserModel) << " camera model."); + } else if (_streamMode) { + RCLCPP_INFO_STREAM( + get_logger(), + " + Playing a network stream from a " << sl::toString(_camUserModel) << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), "Camera model " << sl::toString( + _camUserModel).c_str() << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else if (camera_model == "zedxone4k") { + _camUserModel = sl::MODEL::ZED_XONE_UHD; + if (_svoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " << sl::toString( + _camUserModel) << " camera model."); + } else if (_streamMode) { + RCLCPP_INFO_STREAM( + get_logger(), + " + Playing a network stream from a " << sl::toString(_camUserModel) << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), "Camera model " << sl::toString( + _camUserModel).c_str() << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else if (camera_model == "zedxonehdr") { + _camUserModel = sl::MODEL::ZED_XONE_HDR; + if (_svoMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing an SVO for " + << sl::toString(_camUserModel) + << " camera model."); + } else if (_streamMode) { + RCLCPP_INFO_STREAM( + get_logger(), " + Playing a network stream from a " + << sl::toString(_camUserModel) + << " camera model."); + } else if (!IS_JETSON) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model " << sl::toString(_camUserModel).c_str() + << " is available only with NVIDIA Jetson devices."); + exit(EXIT_FAILURE); + } + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "Camera model not valid in parameter values: " << camera_model); + } + RCLCPP_INFO_STREAM(get_logger(), " * Camera model: " << camera_model << " - " << _camUserModel); +} + +void ZedCameraOne::getCameraInfoParams() +{ + sl_tools::getParam( + shared_from_this(), "general.camera_name", _cameraName, _cameraName, " * Camera name: "); + if (!_svoMode) { + sl_tools::getParam( + shared_from_this(), "general.serial_number", _camSerialNumber, _camSerialNumber, + " * Camera SN: "); + sl_tools::getParam(shared_from_this(), "general.camera_id", _camId, _camId, " * Camera ID: "); + sl_tools::getParam( + shared_from_this(), "general.grab_frame_rate", _camGrabFrameRate, _camGrabFrameRate, " * Camera framerate: ", false, 15, + 120); + } + sl_tools::getParam( + shared_from_this(), "general.gpu_id", _gpuId, _gpuId, " * GPU ID: ", false, -1, 256); +} + +void ZedCameraOne::getResolutionParams() +{ + if (!_svoMode) { + std::string resol = "AUTO"; + sl_tools::getParam(shared_from_this(), "general.grab_resolution", resol, resol); + if (resol == "AUTO") { + _camResol = sl::RESOLUTION::AUTO; + } else if (resol == "HD4K" && _camUserModel == sl::MODEL::ZED_XONE_UHD) { + _camResol = sl::RESOLUTION::HD4K; + } else if (resol == "QHDPLUS" && _camUserModel == sl::MODEL::ZED_XONE_UHD) { + _camResol = sl::RESOLUTION::QHDPLUS; + } else if (resol == "HD1536" && _camUserModel == sl::MODEL::ZED_XONE_HDR) { + _camResol = sl::RESOLUTION::HD1536; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + } else if (resol == "XVGA" && _camUserModel == sl::MODEL::ZED_XONE_HDR) { + _camResol = sl::RESOLUTION::XVGA; +#endif + } else if (resol == "HD1200" && _camUserModel != sl::MODEL::ZED_XONE_HDR) { + _camResol = sl::RESOLUTION::HD1200; + } else if (resol == "HD1080" && _camUserModel != sl::MODEL::ZED_XONE_HDR) { + _camResol = sl::RESOLUTION::HD1080; + } else if (resol == "SVGA" && _camUserModel != sl::MODEL::ZED_XONE_HDR) { + _camResol = sl::RESOLUTION::SVGA; + } else { + RCLCPP_WARN( + get_logger(), "Not valid 'general.grab_resolution' value: '%s'. Using 'AUTO' setting.", + resol.c_str()); + _camResol = sl::RESOLUTION::AUTO; + } + RCLCPP_INFO_STREAM(get_logger(), " * Camera resolution: " << sl::toString(_camResol).c_str()); + } + + std::string out_resol = "NATIVE"; + sl_tools::getParam(shared_from_this(), "general.pub_resolution", out_resol, out_resol); + if (out_resol == "NATIVE") { + _pubResolution = PubRes::NATIVE; + } else if (out_resol == "CUSTOM") { + _pubResolution = PubRes::CUSTOM; + } else { + RCLCPP_WARN( + get_logger(), "Not valid 'general.pub_resolution' value: '%s'. Using default setting instead.", + out_resol.c_str()); + out_resol = "NATIVE"; + _pubResolution = PubRes::NATIVE; + } + RCLCPP_INFO_STREAM(get_logger(), " * Publishing resolution: " << out_resol.c_str()); + + if (_pubResolution == PubRes::CUSTOM) { + sl_tools::getParam( + shared_from_this(), "general.pub_downscale_factor", _customDownscaleFactor, _customDownscaleFactor, " * Publishing downscale factor: ", false, 1.0, + 5.0); + } else { + _customDownscaleFactor = 1.0; + } +} + +void ZedCameraOne::getOpencvCalibrationParam() +{ + sl_tools::getParam( + shared_from_this(), "general.optional_opencv_calibration_file", _opencvCalibFile, _opencvCalibFile, + " * OpenCV custom calibration: "); +} + +void ZedCameraOne::getStreamingServerParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== STREAMING SERVER parameters ==="); + + bool stream_server = false; + sl_tools::getParam( + shared_from_this(), "stream_server.stream_enabled", + stream_server, stream_server, " * Stream enabled: "); + _streamingServerRequired = stream_server; + + std::string codec = "H264"; + sl_tools::getParam(shared_from_this(), "stream_server.codec", codec, codec); + if (codec == "H264") { + _streamingServerCodec = sl::STREAMING_CODEC::H264; + RCLCPP_INFO(get_logger(), " * Stream codec: H264"); + } else if (codec == "H265") { + _streamingServerCodec = sl::STREAMING_CODEC::H265; + RCLCPP_INFO(get_logger(), " * Stream codec: H265"); + } else { + _streamingServerCodec = sl::STREAMING_CODEC::H264; + RCLCPP_WARN_STREAM( + get_logger(), + "Invalid value for the parameter 'stream_server.codec': " << codec << + ". Using the default value."); + RCLCPP_INFO(get_logger(), " * Stream codec: H264"); + } + + sl_tools::getParam( + shared_from_this(), "stream_server.port", + _streamingServerPort, _streamingServerPort, + " * Stream port:", false, 1024, 65535); + sl_tools::getParam( + shared_from_this(), "stream_server.bitrate", + _streamingServerBitrate, _streamingServerBitrate, + " * Stream bitrate:", false, 1000, 60000); + sl_tools::getParam( + shared_from_this(), "stream_server.gop_size", + _streamingServerGopSize, _streamingServerGopSize, + " * Stream GOP size:", false, -1, 256); + sl_tools::getParam( + shared_from_this(), "stream_server.chunk_size", + _streamingServerChunckSize, _streamingServerChunckSize, + " * Stream Chunk size:", false, 1024, 65000); + sl_tools::getParam( + shared_from_this(), "stream_server.adaptative_bitrate", + _streamingServerAdaptiveBitrate, + _streamingServerAdaptiveBitrate, " * Adaptative bitrate:"); + sl_tools::getParam( + shared_from_this(), "stream_server.target_framerate", + _streamingServerTargetFramerate, + _streamingServerTargetFramerate, " * Target frame rate:"); +} + +void ZedCameraOne::getAdvancedParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== ADVANCED parameters ==="); + + sl_tools::getParam( + shared_from_this(), "advanced.change_thread_priority", + _changeThreadSched, _changeThreadSched, + " * Change thread priority: "); + + if (_changeThreadSched) { + sl_tools::getParam( + shared_from_this(), "advanced.thread_sched_policy", + _threadSchedPolicy, _threadSchedPolicy, + " * Thread sched. policy: "); + + if (_threadSchedPolicy == "SCHED_FIFO" || _threadSchedPolicy == "SCHED_RR") { + if (!sl_tools::checkRoot()) { + RCLCPP_WARN_STREAM( + get_logger(), + "'sudo' permissions required to set " + << _threadSchedPolicy + << " thread scheduling policy. Using Linux " + "default [SCHED_OTHER]"); + _threadSchedPolicy = "SCHED_OTHER"; + } else { + sl_tools::getParam( + shared_from_this(), "advanced.thread_grab_priority", + _threadPrioGrab, _threadPrioGrab, + " * Grab thread priority: "); + sl_tools::getParam( + shared_from_this(), "advanced.thread_sensor_priority", + _threadPrioSens, _threadPrioSens, + " * Sensors thread priority: "); + } + } + } +} + +void ZedCameraOne::getDebugParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== DEBUG parameters ==="); + + sl_tools::getParam( + shared_from_this(), "debug.sdk_verbose", _sdkVerbose, + _sdkVerbose, " * SDK Verbose: ", false, 0, 1000); + sl_tools::getParam( + shared_from_this(), "debug.sdk_verbose_log_file", + _sdkVerboseLogFile, _sdkVerboseLogFile, + " * SDK Verbose File: "); + sl_tools::getParam( + shared_from_this(), "debug.use_pub_timestamps", + _usePubTimestamps, _usePubTimestamps, + " * Use Pub Timestamps: "); + + sl_tools::getParam( + shared_from_this(), "debug.debug_common", _debugCommon, + _debugCommon, " * Debug Common: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_dyn_params", + _debugDynParams, _debugDynParams, + " * Debug Dynamic Parameters: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_video_depth", + _debugVideoDepth, _debugVideoDepth, + " * Debug Image/Depth: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_camera_controls", + _debugCamCtrl, _debugCamCtrl, + " * Debug Camera Controls: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_sensors", _debugSensors, + _debugSensors, " * Debug Sensors: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_streaming", + _debugStreaming, _debugStreaming, " * Debug Streaming: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_advanced", _debugAdvanced, + _debugAdvanced, " * Debug Advanced: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_tf", _debugTf, _debugTf, + " * Debug TF: "); + sl_tools::getParam( + shared_from_this(), "debug.debug_nitros", _debugNitros, + _debugNitros, " * Debug Nitros: "); + + // Set debug mode + + _debugMode = _debugCommon || _debugDynParams || _debugVideoDepth || + _debugCamCtrl || _debugSensors || _debugStreaming || + _debugAdvanced || _debugTf || _debugNitros; + + if (_debugMode) { + rcutils_ret_t res = rcutils_logging_set_logger_level( + get_logger().get_name(), RCUTILS_LOG_SEVERITY_DEBUG); + + if (res != RCUTILS_RET_OK) { + RCLCPP_INFO(get_logger(), "Error setting DEBUG level for logger"); + } else { + RCLCPP_INFO(get_logger(), " + Debug Mode enabled +"); + } + } else { + rcutils_ret_t res = rcutils_logging_set_logger_level( + get_logger().get_name(), RCUTILS_LOG_SEVERITY_INFO); + + if (res != RCUTILS_RET_OK) { + RCLCPP_INFO(get_logger(), "Error setting INFO level for logger"); + } + } + + DEBUG_STREAM_COMM( + "[ROS2] Using RMW_IMPLEMENTATION " + << rmw_get_implementation_identifier()); + + const char * nitrosReason = "not_available"; + +#ifdef FOUND_ISAAC_ROS_NITROS + nitrosReason = "enabled"; + + sl_tools::getParam( + shared_from_this(), "debug.disable_nitros", + _nitrosDisabled, _nitrosDisabled); + + bool nitrosDisabledByParam = _nitrosDisabled; + + if (nitrosDisabledByParam) { + nitrosReason = "param_debug.disable_nitros"; + } + + if (!_nitrosDisabled && _usingIPC) { + RCLCPP_WARN( + get_logger(), + "NITROS transport is incompatible with ROS 2 Intra-Process Communication " + "(IPC). NITROS will be disabled. To use NITROS, launch with " + "enable_ipc:=false. NITROS provides its own zero-copy transport, " + "so disabling IPC does not reduce performance."); + _nitrosDisabled = true; + nitrosReason = "auto_disabled_ipc_incompatibility"; + } + + if (nitrosDisabledByParam) { + RCLCPP_WARN( + get_logger(), + "NITROS is available, but is disabled by 'debug.disable_nitros'"); + } +#else + _nitrosDisabled = true; // Force disable NITROS if not available +#endif + + RCLCPP_DEBUG( + get_logger(), + "Transport summary: IPC=%s, NITROS=%s, reason=%s", + _usingIPC ? "enabled" : "disabled", + _nitrosDisabled ? "disabled" : "enabled", + nitrosReason); +} + +void ZedCameraOne::initNode() +{ + // Stop the timer for "one shot" initialization + _initTimer->cancel(); + + // ----> Diagnostic initialization + std::string info = sl::toString(_camUserModel).c_str(); + _diagUpdater.add( + info, this, + &ZedCameraOne::callback_updateDiagnostic); + std::string hw_id = std::string("Stereolabs camera: ") + _cameraName; + _diagUpdater.setHardwareID(hw_id); + // <---- Diagnostic initialization + + // Parameters initialization + initParameters(); + + // Services initialization + initServices(); + + // ----> Start camera + if (!startCamera()) { + _zed.reset(); + exit(EXIT_FAILURE); + } + // <---- Start camera + + // Callback when the node is destroyed + // This is used to stop the camera when the node is destroyed + // and to stop the timers + + // Close camera callback before shutdown + using rclcpp::contexts::get_global_default_context; + get_global_default_context()->add_pre_shutdown_callback( + [this]() { + DEBUG_COMM("ZED X One Component is shutting down"); + deInitNode(); + DEBUG_COMM("ZED X One Component is shutting down - done"); + }); + + // Dynamic parameters callback + _paramChangeCallbackHandle = add_on_set_parameters_callback( + std::bind(&ZedCameraOne::callback_dynamicParamChange, this, _1)); +} + +void ZedCameraOne::initServices() +{ + RCLCPP_INFO(get_logger(), "=== SERVICES ==="); + + std::string srv_name; + std::string srv_prefix = "~/"; + + // Enable Streaming + srv_name = srv_prefix + _srvEnableStreamingName; + _srvEnableStreaming = create_service( + srv_name, + std::bind(&ZedCameraOne::callback_enableStreaming, this, _1, _2, _3)); + RCLCPP_INFO(get_logger(), " * '%s'", _srvEnableStreaming->get_service_name()); + + // Start SVO Recording + srv_name = srv_prefix + _srvStartSvoRecName; + _srvStartSvoRec = create_service( + srv_name, std::bind(&ZedCameraOne::callback_startSvoRec, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << _srvStartSvoRec->get_service_name() + << "'"); + // Stop SVO Recording + srv_name = srv_prefix + _srvStopSvoRecName; + _srvStopSvoRec = create_service( + srv_name, std::bind(&ZedCameraOne::callback_stopSvoRec, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << _srvStopSvoRec->get_service_name() + << "'"); + + // Pause SVO (only if the realtime playing mode is disabled) + if (_svoMode) { +#ifndef USE_SVO_REALTIME_PAUSE + if (!_svoRealtime) { +#endif + srv_name = srv_prefix + _srvToggleSvoPauseName; + _srvPauseSvo = create_service( + srv_name, + std::bind(&ZedCameraOne::callback_pauseSvoInput, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << _srvPauseSvo->get_service_name() + << "'"); +#ifndef USE_SVO_REALTIME_PAUSE + } +#endif + // Set Service for SVO frame + srv_name = srv_prefix + _srvSetSvoFrameName; + _srvSetSvoFrame = create_service( + srv_name, std::bind(&ZedCameraOne::callback_setSvoFrame, this, _1, _2, _3)); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on service: '" + << _srvSetSvoFrame->get_service_name() + << "'"); + } +} + +bool ZedCameraOne::startCamera() +{ + RCLCPP_INFO(get_logger(), "=== STARTING CAMERA ==="); + + createZedObject(); + logSdkVersion(); + setupTf2(); + configureZedInput(); + setZedInitParams(); + + if (!openZedCamera()) { + return false; + } + + _uptimer.tic(); + + // ----> Set SVO first frame if required + if (_svoMode && _svoFrameStart != 0) { + int svo_frames = _zed->getSVONumberOfFrames(); + + if (_svoFrameStart > svo_frames) { + RCLCPP_ERROR_STREAM( + get_logger(), + "The SVO contains " + << svo_frames + << " frames. The requested starting frame (" + << _svoFrameStart << ") is invalid."); + return false; + } + + _zed->setSVOPosition(_svoFrameStart); + RCLCPP_WARN_STREAM( + get_logger(), + "SVO playing from frame #" << _svoFrameStart); + } + // <---- Set SVO first frame if required + + processCameraInformation(); + initTFCoordFrameNames(); + setupCameraInfoMessages(); + initPublishers(); + initializeTimestamp(); + initializeDiagnosticStatistics(); + initThreadsAndTimers(); + + return true; +} + +// Helper functions for startCamera() + +void ZedCameraOne::createZedObject() +{ + _zed = std::make_shared(); +} + +void ZedCameraOne::logSdkVersion() +{ + RCLCPP_INFO( + get_logger(), "ZED SDK Version: %d.%d.%d - Build %s", + ZED_SDK_MAJOR_VERSION, ZED_SDK_MINOR_VERSION, + ZED_SDK_PATCH_VERSION, ZED_SDK_BUILD_ID); +} + +void ZedCameraOne::setupTf2() +{ + _tfBuffer = std::make_unique(get_clock()); + _tfListener = std::make_unique(*_tfBuffer); + _tfBroadcaster = std::make_unique(this); + + _staticImuTfPublished = false; + if (!_usingIPC) { + _staticTfBroadcaster = std::make_unique(this); + } else { // Cannot use TRANSIENT_LOCAL with intra-process comms + _staticTfBroadcaster.reset(); + } +} + +void ZedCameraOne::configureZedInput() +{ + + if (!_svoFilepath.empty()) { + RCLCPP_INFO(get_logger(), "=== SVO OPENING ==="); + _initParams.input.setFromSVOFile(_svoFilepath.c_str()); + _initParams.svo_real_time_mode = _svoRealtime; +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 53 + _initParams.svo_decryption_key = _svoDecryptionKey.c_str(); +#endif + _svoMode = true; + return; + } + + if (!_streamAddr.empty()) { + RCLCPP_INFO(get_logger(), "=== LOCAL STREAMING OPENING ==="); + _initParams.input.setFromStream( + _streamAddr.c_str(), + static_cast(_streamPort)); + _streamMode = true; + return; + } + RCLCPP_INFO(get_logger(), "=== CAMERA OPENING ==="); + _initParams.camera_fps = _camGrabFrameRate; + _initParams.camera_resolution = static_cast(_camResol); + + if (_camSerialNumber > 0) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + _initParams.input.setFromSerialNumber(_camSerialNumber, sl::BUS_TYPE::GMSL); +#else + _initParams.input.setFromSerialNumber(_camSerialNumber); +#endif + } else if (_camId >= 0) { +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) < 51 + _initParams.input.setFromCameraID(_camId, sl::BUS_TYPE::GMSL, sl::CAMERA_TYPE::MONO); +#else + _initParams.input.setFromCameraID(_camId, sl::BUS_TYPE::GMSL); +#endif + } +} + +void ZedCameraOne::setZedInitParams() +{ + _initParams.async_grab_camera_recovery = true; + _initParams.camera_image_flip = (_cameraFlip ? sl::FLIP_MODE::ON : sl::FLIP_MODE::OFF); + _initParams.coordinate_system = ROS_COORDINATE_SYSTEM; + _initParams.coordinate_units = ROS_MEAS_UNITS; + _initParams.enable_hdr = _enableHDR; + if (!_opencvCalibFile.empty()) { + _initParams.optional_opencv_calibration_file = _opencvCalibFile.c_str(); + } + _initParams.sdk_verbose = _sdkVerbose; + _initParams.sdk_verbose_log_file = _sdkVerboseLogFile.c_str(); +} + +bool ZedCameraOne::openZedCamera() +{ + _grabStatus = sl::ERROR_CODE::LAST; + _connStatus = _zed->open(_initParams); + + if (_connStatus != sl::ERROR_CODE::SUCCESS) { + +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + if (_connStatus == sl::ERROR_CODE::DRIVER_FAILURE) { + RCLCPP_ERROR_STREAM( + get_logger(), + "ZED X Driver failure: " << sl::toVerbose(_connStatus) + << ". Please verify that the ZED drivers " + "are correctly installed."); + return false; + } +#endif + + if (_connStatus == sl::ERROR_CODE::INVALID_CALIBRATION_FILE) { + if (_opencvCalibFile.empty()) { + RCLCPP_ERROR_STREAM( + get_logger(), "Calibration file error: " + << sl::toVerbose(_connStatus)); + } else { + RCLCPP_ERROR_STREAM( + get_logger(), + "If you are using a custom OpenCV calibration file, please check " + "the correctness of the path of the calibration file " + "in the parameter 'general.optional_opencv_calibration_file': '" + << _opencvCalibFile << "'."); + RCLCPP_ERROR( + get_logger(), + "If the file exists, it may contain invalid information."); + } + return false; + } else if (_svoMode) { + RCLCPP_WARN( + get_logger(), "Error opening SVO: %s", + sl::toString(_connStatus).c_str()); + return false; + } else if (_streamMode) { + RCLCPP_WARN( + get_logger(), "Error opening Local Stream: %s", + sl::toString(_connStatus).c_str()); + return false; + } else { + RCLCPP_WARN( + get_logger(), "Error opening camera: %s", + sl::toString(_connStatus).c_str()); + RCLCPP_INFO(get_logger(), "Please verify the camera connection"); + return false; + } + } else { + DEBUG_STREAM_COMM("Opening successfull"); + } + return true; +} + +void ZedCameraOne::processCameraInformation() +{ + sl::CameraOneInformation camInfo = _zed->getCameraInformation(); + + float realFps = camInfo.camera_configuration.fps; + if (realFps != static_cast(_camGrabFrameRate)) { + RCLCPP_WARN_STREAM( + get_logger(), + "!!! `general.grab_frame_rate` value is not valid: '" + << _camGrabFrameRate + << "'. Automatically replaced with '" << realFps + << "'. Please fix the parameter !!!"); + _camGrabFrameRate = realFps; + } + + cuCtxGetDevice(&_gpuId); + RCLCPP_INFO_STREAM(get_logger(), "ZED SDK running on GPU #" << _gpuId); + + _camRealModel = camInfo.camera_model; + + if (_camRealModel != _camUserModel) { + RCLCPP_WARN(get_logger(), "Camera model does not match user parameter."); + + if (_camRealModel == sl::MODEL::ZED_XONE_GS) { + RCLCPP_WARN(get_logger(), "Please set the parameter 'general.camera_model' to 'zedxonegs'"); + } else if (_camRealModel == sl::MODEL::ZED_XONE_UHD) { + RCLCPP_WARN(get_logger(), "Please set the parameter 'general.camera_model' to 'zedxone4k'"); + } else if (_camRealModel == sl::MODEL::ZED_XONE_HDR) { + RCLCPP_WARN(get_logger(), "Please set the parameter 'general.camera_model' to 'zedxonehdr'"); + } + } + + RCLCPP_INFO_STREAM( + get_logger(), " * Camera Model -> " + << sl::toString(_camRealModel).c_str()); + _camSerialNumber = camInfo.serial_number; + RCLCPP_INFO_STREAM(get_logger(), " * Serial Number -> " << _camSerialNumber); + + // ----> Update HW ID + std::string hw_id = std::string("Stereolabs "); + hw_id += sl::toString(_camRealModel).c_str(); + hw_id += " - '" + _cameraName + "'" + " - S/N: " + std::to_string(_camSerialNumber); + _diagUpdater.setHardwareID(hw_id); + _diagUpdater.force_update(); + // <---- Update HW ID + + RCLCPP_INFO_STREAM( + get_logger(), + " * Focal Lenght -> " + << camInfo.camera_configuration.calibration_parameters.focal_length_metric + << " mm"); + + RCLCPP_INFO_STREAM( + get_logger(), + " * Input\t -> " + << sl::toString(camInfo.input_type).c_str()); + + if (_svoMode) { + RCLCPP_INFO( + get_logger(), " * SVO resolution\t-> %dx%d", + camInfo.camera_configuration.resolution.width, + camInfo.camera_configuration.resolution.height); + RCLCPP_INFO_STREAM( + get_logger(), + " * SVO framerate\t\t-> " + << (camInfo.camera_configuration.fps)); + } + + if (!_svoMode) { + _camFwVersion = camInfo.camera_configuration.firmware_version; + RCLCPP_INFO_STREAM( + get_logger(), + " * Camera FW Version -> " << _camFwVersion); + _sensFwVersion = camInfo.sensors_configuration.firmware_version; + RCLCPP_INFO_STREAM( + get_logger(), + " * Sensors FW Version -> " << _sensFwVersion); + } + + _slCamImuTransf = camInfo.sensors_configuration.camera_imu_transform; + DEBUG_SENS("Camera-IMU Transform:\n%s", _slCamImuTransf.getInfos().c_str()); + + _camWidth = camInfo.camera_configuration.resolution.width; + _camHeight = camInfo.camera_configuration.resolution.height; + + RCLCPP_INFO_STREAM( + get_logger(), " * Camera grab frame size -> " + << _camWidth << "x" << _camHeight); + + int pub_w, pub_h; + pub_w = static_cast(std::round(_camWidth / _customDownscaleFactor)); + pub_h = static_cast(std::round(_camHeight / _customDownscaleFactor)); + + if (pub_w > _camWidth || pub_h > _camHeight) { + RCLCPP_WARN_STREAM( + get_logger(), "The publishing resolution (" + << pub_w << "x" << pub_h + << ") cannot be higher than the grabbing resolution (" + << _camWidth << "x" << _camHeight + << "). Using grab resolution for output messages."); + pub_w = _camWidth; + pub_h = _camHeight; + } + + _matResol = sl::Resolution(pub_w, pub_h); + RCLCPP_INFO_STREAM( + get_logger(), " * Publishing frame size -> " + << _matResol.width << "x" + << _matResol.height); +} + +void ZedCameraOne::initializeTimestamp() +{ + if (_svoMode) { + if (_useSvoTimestamp) { + _frameTimestamp = sl_tools::slTime2Ros(_zed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } else { + _frameTimestamp = + sl_tools::slTime2Ros(_zed->getTimestamp(sl::TIME_REFERENCE::CURRENT)); + } + } else { + _frameTimestamp = + sl_tools::slTime2Ros(_zed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } +} + +void ZedCameraOne::initializeDiagnosticStatistics() +{ + _elabPeriodMean_sec = std::make_unique(_camGrabFrameRate); + _grabPeriodMean_sec = std::make_unique(_camGrabFrameRate); + _imagePeriodMean_sec = std::make_unique(_camGrabFrameRate); + _imageElabMean_sec = std::make_unique(_camGrabFrameRate); + _imuPeriodMean_sec = std::make_unique(20); + _pubImuTF_sec = std::make_unique(_sensPubRate); + _pubImu_sec = std::make_unique(_sensPubRate); +} + +void ZedCameraOne::initThreadsAndTimers() +{ + // Start Heartbeat timer + startHeartbeatTimer(); + + // ----> Start CMOS Temperatures thread + startTempPubTimer(); + // <---- Start CMOS Temperatures thread + + // Start sensor thread + _sensThread = std::thread(&ZedCameraOne::threadFunc_pubSensorsData, this); + + // Start grab thread + _grabThread = std::thread(&ZedCameraOne::threadFunc_zedGrab, this); +} + +void ZedCameraOne::callback_updateDiagnostic( + diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + DEBUG_COMM("=== Update Diagnostic ==="); + + if (_connStatus != sl::ERROR_CODE::SUCCESS) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::ERROR, + sl::toString(_connStatus).c_str()); + return; + } + + stat.addf("Uptime", "%s", sl_tools::seconds2str(_uptimer.toc()).c_str()); + + if (_grabStatus == sl::ERROR_CODE::SUCCESS) { + updateCaptureDiagnostics(stat); + updateInputModeDiagnostics(stat); + updateImageDiagnostics(stat); + updateSvoDiagnostics(stat); + updateTfImuDiagnostics(stat); + } else if (_grabStatus == sl::ERROR_CODE::LAST) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::OK, + "Camera initializing"); + } else { + stat.summaryf( + diagnostic_msgs::msg::DiagnosticStatus::ERROR, + "%s", sl::toString(_grabStatus).c_str()); + } + + updateImuDiagnostics(stat); + updateTemperatureDiagnostics(stat); + updateSvoRecordingDiagnostics(stat); + updateStreamingServerDiagnostics(stat); +} + +// --- Helper functions for diagnostics --- + +void ZedCameraOne::updateCaptureDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + double freq = 1. / _grabPeriodMean_sec->getAvg(); + double freq_perc = 100. * freq / _camGrabFrameRate; + stat.addf("Capture", "Mean Frequency: %.1f Hz (%.1f%%)", freq, freq_perc); + + double frame_proc_sec = _elabPeriodMean_sec->getAvg(); + double frame_grab_period = 1. / _camGrabFrameRate; + stat.addf( + "Capture", "Tot. Processing Time: %.6f sec (Max. %.3f sec)", + frame_proc_sec, frame_grab_period); + + if (frame_proc_sec > frame_grab_period) { + _sysOverloadCount++; + } + + if (_sysOverloadCount >= 10) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "System overloaded. Consider reducing " + "'general.grab_frame_rate' or 'general.grab_resolution'"); + } else { + _sysOverloadCount = 0; + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::OK, + "Camera grabbing"); + } + + // ----> Frame drop count + if (_zed && _zed->isOpened()) { + auto dropped = _zed->getFrameDroppedCount(); + uint64_t total = dropped + _frameCount; + auto perc_drop = 100. * static_cast(dropped) / total; + stat.addf( + "Frame Drop rate", "%u/%lu (%g%%)", + dropped, total, perc_drop); + } + // <---- Frame drop count +} + +void ZedCameraOne::updateInputModeDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + + if (_svoMode) { + stat.add("Input mode", "SVO"); + stat.addf("SVO file", "%s", _svoFilepath.c_str()); + return; + } + + if (_streamMode) { + stat.add("Input mode", "LOCAL STREAM"); + } else { + stat.add("Input mode", "Live Camera"); + } +} + +void ZedCameraOne::updateImageDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_videoPublishing) { + double freq = 1. / _imagePeriodMean_sec->getAvg(); + double freq_perc = 100. * freq / _camGrabFrameRate; + double frame_grab_period = 1. / _camGrabFrameRate; + stat.addf("Image", "Mean Frequency: %.1f Hz (%.1f%%)", freq, freq_perc); + stat.addf( + "Image", "Processing Time: %.6f sec (Max. %.3f sec)", + _imagePeriodMean_sec->getAvg(), frame_grab_period); + } else { + stat.add("Image", "Topic not subscribed"); + } +} + +void ZedCameraOne::updateSvoDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_svoMode) { + double svo_perc = + 100. * (static_cast(_svoFrameId) / _svoFrameCount); + + stat.addf( + "Playing SVO", "%sFrame: %d/%d (%.1f%%)", + (_svoPause ? "PAUSED - " : ""), _svoFrameId, _svoFrameCount, + svo_perc); + stat.addf("SVO Loop", "%s", (_svoLoop ? "ON" : "OFF")); + if (_svoLoop) { + stat.addf("SVO Loop Count", "%d", _svoLoopCount); + } + stat.addf("SVO Real Time mode", "%s", (_svoRealtime ? "ON" : "OFF")); + } +} + +void ZedCameraOne::updateTfImuDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_publishSensImuTF) { + double freq = 1. / _pubImuTF_sec->getAvg(); + stat.addf("TF IMU", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("TF IMU", "DISABLED"); + } +} + +void ZedCameraOne::updateImuDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_imuPublishing) { + double freq = 1. / _pubImu_sec->getAvg(); + stat.addf("IMU", "Mean Frequency: %.1f Hz", freq); + } else { + stat.add("IMU Sensor", "Topics not subscribed"); + } +} + +void ZedCameraOne::updateTemperatureDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + stat.addf("Camera Temp.", "%.1f °C", _tempImu); + + if (_tempImu > 70.f) { + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "High Camera temperature"); + } +} + +void ZedCameraOne::updateSvoRecordingDiagnostics(diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_recording) { + if (!_recStatus.status) { + stat.add("SVO Recording", "ERROR"); + stat.summary( + diagnostic_msgs::msg::DiagnosticStatus::WARN, + "Error adding frames to SVO file while recording. " + "Check " + "free disk space"); + } else { + stat.add("SVO Recording", "ACTIVE"); + stat.addf( + "SVO compression time", "%g msec", + _recStatus.average_compression_time); + stat.addf( + "SVO compression ratio", "%.1f%%", + _recStatus.average_compression_ratio); + stat.addf("SVO Frame Encoded", "%d", _recStatus.number_frames_encoded); + stat.addf("SVO Frame Ingested", "%d", _recStatus.number_frames_ingested); + } + } else { + stat.add("SVO Recording", "NOT ACTIVE"); + } +} + +void ZedCameraOne::updateStreamingServerDiagnostics( + diagnostic_updater::DiagnosticStatusWrapper & stat) +{ + if (_streamingServerRunning) { + stat.add("Streaming Server", "ACTIVE"); + + sl::StreamingParameters params; + params = _zed->getStreamingParameters(); + + stat.addf("Streaming port", "%d", static_cast(params.port)); + stat.addf( + "Streaming codec", "%s", + (params.codec == sl::STREAMING_CODEC::H264 ? "H264" : "H265")); + stat.addf("Streaming bitrate", "%d mbps", static_cast(params.bitrate)); + stat.addf("Streaming chunk size", "%d B", static_cast(params.chunk_size)); + stat.addf("Streaming GOP size", "%d", static_cast(params.gop_size)); + } else { + stat.add("Streaming Server", "NOT ACTIVE"); + } +} + +rcl_interfaces::msg::SetParametersResult +ZedCameraOne::callback_dynamicParamChange( + std::vector parameters) +{ + DEBUG_STREAM_COMM("=== Parameter change callback ==="); + + rcl_interfaces::msg::SetParametersResult result; + result.successful = true; + + int count_ok = 0; + int count = 0; + + DEBUG_STREAM_COMM("Modifying " << parameters.size() << " parameters"); + + for (const auto & param : parameters) { + DEBUG_STREAM_COMM("Param #" << count << ": " << param.get_name()); + count++; + + std::string param_name = param.get_name(); + + if (param_name == "sensors.sensors_pub_rate") { + double value = param.as_double(); + if (value != _sensPubRate) { + _sensPubRate = value; + _pubImuTF_sec->setNewSize(static_cast(_sensPubRate)); + _pubImu_sec->setNewSize(static_cast(_sensPubRate)); + DEBUG_STREAM_COMM(" * " << param_name << " changed to " << value); + } else { + DEBUG_STREAM_COMM(" * " << param_name << " not changed: " << value); + } + continue; + } + + auto found = _camDynParMapChanged.find(param_name); + if (found != _camDynParMapChanged.end()) { + if (handleDynamicVideoParam(param, param_name, count_ok, result)) { + continue; + } else { + result.successful = false; + result.reason = "Parameter " + param_name + " not mapped"; + DEBUG_COMM(result.reason.c_str()); + break; + } + } else { + result.successful = false; + result.reason = "Parameter " + param_name + " not recognized"; + DEBUG_COMM(result.reason.c_str()); + break; + } + } + + DEBUG_STREAM_COMM("Updated parameters: " << count_ok << "/" << parameters.size()); + + _triggerUpdateDynParams = (count_ok > 0); + + if (count_ok > 0) { + if (_debugCommon) { + DEBUG_COMM("Settings to be applied: "); + for (auto & param: _camDynParMapChanged) { + if (param.second) {DEBUG_STREAM_COMM(" * " << param.first);} + } + } + } + + return result; +} + +// Helper function for handling dynamic video parameters +bool ZedCameraOne::handleDynamicVideoParam( + const rclcpp::Parameter & param, + const std::string & param_name, + int & count_ok, + rcl_interfaces::msg::SetParametersResult & result) +{ + if (handleSaturationSharpnessGamma(param, param_name, count_ok)) { + result.successful = true; + return true; + } + if (handleWhiteBalance(param, param_name, count_ok)) { + result.successful = true; + return true; + } + if (handleExposure(param, param_name, count_ok)) { + result.successful = true; + return true; + } + if (handleAnalogGain(param, param_name, count_ok)) { + result.successful = true; + return true; + } + if (handleDigitalGain(param, param_name, count_ok)) { + result.successful = true; + return true; + } + return false; +} + +// Helper for saturation, sharpness, gamma +bool ZedCameraOne::handleSaturationSharpnessGamma( + const rclcpp::Parameter & param, const std::string & param_name, int & count_ok) +{ + auto updateIntParam = [this](const std::string & paramName, const int value, int & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + + if (param_name == "video.saturation") { + if (updateIntParam(param_name, param.as_int(), _camSaturation)) {count_ok++;} + return true; + } else if (param_name == "video.sharpness") { + if (updateIntParam(param_name, param.as_int(), _camSharpness)) {count_ok++;} + return true; + } else if (param_name == "video.gamma") { + if (updateIntParam(param_name, param.as_int(), _camGamma)) {count_ok++;} + return true; + } + return false; +} + +// Helper for white balance +bool ZedCameraOne::handleWhiteBalance( + const rclcpp::Parameter & param, const std::string & param_name, int & count_ok) +{ + auto updateIntParam = [this](const std::string & paramName, const int value, int & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + auto updateBoolParam = [this](const std::string & paramName, const bool value, bool & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + + if (param_name == "video.auto_whitebalance") { + if (updateBoolParam(param_name, param.as_bool(), _camAutoWB)) {count_ok++;} + return true; + } else if (param_name == "video.whitebalance_temperature") { + if (updateIntParam(param_name, param.as_int(), _camWBTemp)) { + count_ok++; + updateBoolParam("video.auto_whitebalance", false, _camAutoWB); + } + return true; + } + return false; +} + +// Helper for exposure +bool ZedCameraOne::handleExposure( + const rclcpp::Parameter & param, const std::string & param_name, int & count_ok) +{ + auto updateIntParam = [this](const std::string & paramName, const int value, int & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + auto updateBoolParam = [this](const std::string & paramName, const bool value, bool & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + + if (param_name == "video.auto_exposure") { + if (updateBoolParam(param_name, param.as_bool(), _camAutoExposure)) {count_ok++;} + return true; + } else if (param_name == "video.exposure_compensation") { + if (updateIntParam(param_name, param.as_int(), _camExposureComp)) {count_ok++;} + return true; + } else if (param_name == "video.exposure_time") { + if (updateIntParam(param_name, param.as_int(), _camExpTime)) { + count_ok++; + updateBoolParam("video.auto_exposure", false, _camAutoExposure); + updateIntParam("video.auto_exposure_time_range_min", _camExpTime, _camAutoExpTimeRangeMin); + updateIntParam("video.auto_exposure_time_range_max", _camExpTime, _camAutoExpTimeRangeMax); + } + return true; + } else if (param_name == "video.auto_exposure_time_range_min") { + if (updateIntParam(param_name, param.as_int(), _camAutoExpTimeRangeMin)) { + count_ok++; + if (_camAutoExpTimeRangeMin != _camAutoExpTimeRangeMax) { + updateBoolParam("video.auto_exposure", true, _camAutoExposure); + } else { + updateBoolParam("video.auto_exposure", false, _camAutoExposure); + updateIntParam("video.exposure_time", _camAutoExpTimeRangeMin, _camExpTime); + } + } + return true; + } else if (param_name == "video.auto_exposure_time_range_max") { + if (updateIntParam(param_name, param.as_int(), _camAutoExpTimeRangeMax)) { + count_ok++; + if (_camAutoExpTimeRangeMin != _camAutoExpTimeRangeMax) { + updateBoolParam("video.auto_exposure", true, _camAutoExposure); + } else { + updateBoolParam("video.auto_exposure", false, _camAutoExposure); + updateIntParam("video.exposure_time", _camAutoExpTimeRangeMax, _camExpTime); + } + } + return true; + } + return false; +} + +// Helper for analog gain +bool ZedCameraOne::handleAnalogGain( + const rclcpp::Parameter & param, const std::string & param_name, int & count_ok) +{ + auto updateIntParam = [this](const std::string & paramName, const int value, int & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + auto updateBoolParam = [this](const std::string & paramName, const bool value, bool & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + + if (param_name == "video.auto_analog_gain") { + if (updateBoolParam(param_name, param.as_bool(), _camAutoAnalogGain)) {count_ok++;} + return true; + } else if (param_name == "video.analog_gain") { + if (updateIntParam(param_name, param.as_int(), _camAnalogGain)) { + count_ok++; + updateBoolParam("video.auto_analog_gain", false, _camAutoAnalogGain); + updateIntParam( + "video.auto_analog_gain_range_min", _camAnalogGain, + _camAutoAnalogGainRangeMin); + updateIntParam( + "video.auto_analog_gain_range_max", _camAnalogGain, + _camAutoAnalogGainRangeMax); + } + return true; + } else if (param_name == "video.auto_analog_gain_range_min") { + if (updateIntParam(param_name, param.as_int(), _camAutoAnalogGainRangeMin)) { + count_ok++; + if (_camAutoAnalogGainRangeMin != _camAutoAnalogGainRangeMax) { + updateBoolParam("video.auto_analog_gain", true, _camAutoAnalogGain); + } else { + updateBoolParam("video.auto_analog_gain", false, _camAutoAnalogGain); + updateIntParam("video.analog_gain", _camAutoAnalogGainRangeMin, _camAnalogGain); + } + } + return true; + } else if (param_name == "video.auto_analog_gain_range_max") { + if (updateIntParam(param_name, param.as_int(), _camAutoAnalogGainRangeMax)) { + count_ok++; + if (_camAutoAnalogGainRangeMin != _camAutoAnalogGainRangeMax) { + updateBoolParam("video.auto_analog_gain", true, _camAutoAnalogGain); + } else { + updateBoolParam("video.auto_analog_gain", false, _camAutoAnalogGain); + updateIntParam("video.analog_gain", _camAutoAnalogGainRangeMax, _camAnalogGain); + } + } + return true; + } + return false; +} + +// Helper for digital gain +bool ZedCameraOne::handleDigitalGain( + const rclcpp::Parameter & param, const std::string & param_name, int & count_ok) +{ + auto updateIntParam = [this](const std::string & paramName, const int value, int & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + auto updateBoolParam = [this](const std::string & paramName, const bool value, bool & paramVal) { + if (value != paramVal) { + paramVal = value; + _camDynParMapChanged[paramName] = true; + DEBUG_STREAM_COMM(" * " << paramName << " changed to " << value); + } else { + _camDynParMapChanged[paramName] = false; + DEBUG_STREAM_COMM(" * " << paramName << " not changed: " << value); + } + return _camDynParMapChanged[paramName]; + }; + + if (param_name == "video.auto_digital_gain") { + if (updateBoolParam(param_name, param.as_bool(), _camAutoDigitalGain)) {count_ok++;} + return true; + } else if (param_name == "video.digital_gain") { + if (updateIntParam(param_name, param.as_int(), _camDigitalGain)) { + count_ok++; + updateBoolParam("video.auto_digital_gain", false, _camAutoDigitalGain); + updateIntParam( + "video.auto_digital_gain_range_min", _camDigitalGain, + _camAutoDigitalGainRangeMin); + updateIntParam( + "video.auto_digital_gain_range_max", _camDigitalGain, + _camAutoDigitalGainRangeMax); + } + return true; + } else if (param_name == "video.auto_digital_gain_range_min") { + if (updateIntParam(param_name, param.as_int(), _camAutoDigitalGainRangeMin)) { + count_ok++; + if (_camAutoDigitalGainRangeMin != _camAutoDigitalGainRangeMax) { + updateBoolParam("video.auto_digital_gain", true, _camAutoDigitalGain); + } else { + updateBoolParam("video.auto_digital_gain", false, _camAutoDigitalGain); + updateIntParam("video.digital_gain", _camAutoDigitalGainRangeMin, _camDigitalGain); + } + } + return true; + } else if (param_name == "video.auto_digital_gain_range_max") { + if (updateIntParam(param_name, param.as_int(), _camAutoDigitalGainRangeMax)) { + count_ok++; + if (_camAutoDigitalGainRangeMin != _camAutoDigitalGainRangeMax) { + updateBoolParam("video.auto_digital_gain", true, _camAutoDigitalGain); + } else { + updateBoolParam("video.auto_digital_gain", false, _camAutoDigitalGain); + updateIntParam("video.digital_gain", _camAutoDigitalGainRangeMax, _camDigitalGain); + } + } + return true; + } + return false; +} + +void ZedCameraOne::initTFCoordFrameNames() +{ + // ----> Coordinate frames + _cameraLinkFrameId = _cameraName + "_camera_link"; + _cameraCenterFrameId = _cameraName + "_camera_center"; + _camImgFrameId = _cameraName + "_camera_frame"; + _camOptFrameId = _cameraName + "_camera_frame_optical"; + _imuFrameId = _cameraName + "_imu_link"; + + // Print TF frames + RCLCPP_INFO_STREAM(get_logger(), "=== TF FRAMES ==="); + RCLCPP_INFO_STREAM(get_logger(), " * Camera link\t-> " << _cameraLinkFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Camera center\t-> " << _cameraCenterFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Image\t\t-> " << _camImgFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * Optical\t-> " << _camOptFrameId); + RCLCPP_INFO_STREAM(get_logger(), " * IMU\t\t-> " << _imuFrameId); + // <---- Coordinate frames +} + +void ZedCameraOne::initPublishers() +{ + RCLCPP_INFO(get_logger(), "=== PUBLISHED TOPICS ==="); + + // Video + initVideoPublishers(); + + // Sensors + initSensorPublishers(); + + // ----> Camera/imu transform message + if (_publishSensImuTransf) { + std::string cam_imu_tr_topic = _topicRoot + "left_cam_imu_transform"; + auto qos = _qos; + if (!_usingIPC) { + // Use TRANSIENT_LOCAL durability if not using intra-process comms + qos = qos.durability(rclcpp::DurabilityPolicy::TransientLocal); + } + _pubCamImuTransf = create_publisher( + cam_imu_tr_topic, qos, _pubOpt); + + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << _pubCamImuTransf->get_topic_name()); + + sl::Orientation sl_rot = _slCamImuTransf.getOrientation(); + sl::Translation sl_tr = _slCamImuTransf.getTranslation(); + RCLCPP_INFO( + get_logger(), " * Camera-IMU Translation: \n %g %g %g", sl_tr.x, + sl_tr.y, sl_tr.z); + RCLCPP_INFO( + get_logger(), " * Camera-IMU Rotation:\n%s", + sl_rot.getRotationMatrix().getInfos().c_str()); + } + // <---- Camera/imu transform message + + // Status topic names + std::string status_root = _topicRoot + "status/"; + std::string svo_status_topic = status_root + "svo"; + std::string heartbeat_topic = status_root + "heartbeat"; + + RCLCPP_INFO(get_logger(), " +++ STATUS TOPICS +++"); + + // ----> SVO Status publisher + if (_svoMode) { + if (_publishStatus) { + _pubSvoStatus = create_publisher( + svo_status_topic, _qos, _pubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << _pubSvoStatus->get_topic_name()); + } + if (_useSvoTimestamp && _publishSvoClock) { + auto clock_qos = rclcpp::ClockQoS(); + clock_qos.reliability(rclcpp::ReliabilityPolicy::Reliable); // REQUIRED + _pubClock = create_publisher( + "/clock", clock_qos, _pubOpt); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << _pubClock->get_topic_name()); + } + } + // <---- SVO Status publisher + + if (_publishStatus) { + // ----> Heartbeat Status publisher + _pubHeartbeatStatus = create_publisher( + heartbeat_topic, _qos, _pubOpt); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << _pubHeartbeatStatus->get_topic_name()); + // <---- Heartbeat Status publisher + } +} + +void ZedCameraOne::threadFunc_zedGrab() +{ + DEBUG_STREAM_COMM("Grab thread started"); + + // Set the name of the zedGrab thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_zedGrab")).c_str()); + + setupGrabThreadPolicy(); + + initializeGrabThreadStatus(); + + while (1) { + try { + RCLCPP_INFO_STREAM_ONCE(get_logger(), "=== " << _cameraName << " started ==="); + + sl_tools::StopWatch grabElabTimer(get_clock()); + + if (checkGrabThreadInterruption()) {break;} + + if (_svoMode && _svoPause) { + if (!_grabOnce) { + rclcpp::sleep_for(100ms); +#ifdef USE_SVO_REALTIME_PAUSE + // Dummy grab + mZed->grab(); +#endif + continue; + } else { + _grabOnce = false; // Reset the flag and grab once + } + } + + handleDynamicSettings(); + + updateGrabFrequency(); + + if (!_svoPause) { + grabElabTimer.tic(); + if (!performCameraGrab()) {break;} + updateFrameTimestamp(); + if (_svoMode) { + _svoFrameId = _zed->getSVOPosition(); + _svoFrameCount = _zed->getSVONumberOfFrames(); + + publishSvoClock(); + } + handleStreamingServer(); + } + + handleSvoRecordingStatus(); + + handleImageRetrievalAndPublishing(); + + _elabPeriodMean_sec->addValue(grabElabTimer.toc()); + } catch (rclcpp::exceptions::ParameterNotDeclaredException & ex) { + RCLCPP_ERROR_STREAM(get_logger(), "ParameterNotDeclaredException: " << ex.what()); + continue; + } catch (const std::exception & e) { + RCLCPP_ERROR_STREAM(get_logger(), "Exception: " << e.what()); + continue; + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("threadFunc_zedGrab: Generic exception."); + continue; + } + } + + _diagUpdater.broadcast(diagnostic_msgs::msg::DiagnosticStatus::STALE, "Grab thread stopped"); + _diagUpdater.force_update(); + + // Stop the heartbeat + _heartbeatTimer->cancel(); + + DEBUG_STREAM_COMM("Grab thread finished"); +} + +// Helper functions for threadFunc_zedGrab + +void ZedCameraOne::setupGrabThreadPolicy() +{ + if (_changeThreadSched) { + DEBUG_STREAM_ADV("Grab thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default GRAB thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + sched_param par; + par.sched_priority = + (_threadSchedPolicy == "SCHED_FIFO" || _threadSchedPolicy == "SCHED_RR") ? + _threadPrioGrab : + 0; + int policy = SCHED_OTHER; + if (_threadSchedPolicy == "SCHED_BATCH") { + policy = SCHED_BATCH; + } else if (_threadSchedPolicy == "SCHED_FIFO") { + policy = SCHED_FIFO; + } else if (_threadSchedPolicy == "SCHED_RR") { + policy = SCHED_RR; + } + if (pthread_setschedparam(pthread_self(), policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + + if (_debugAdvanced) { + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New GRAB thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + } +} + +void ZedCameraOne::initializeGrabThreadStatus() +{ + _frameCount = 0; + _threadStop = false; +} + +bool ZedCameraOne::checkGrabThreadInterruption() +{ + if (!rclcpp::ok()) { + DEBUG_STREAM_COMM("Ctrl+C received: stopping grab thread"); + _threadStop = true; + return true; + } + if (_threadStop) { + DEBUG_STREAM_COMM("Grab thread stopped"); + return true; + } + return false; +} + +void ZedCameraOne::handleDynamicSettings() +{ + if (!_svoMode && _triggerUpdateDynParams) { + applyDynamicSettings(); + } +} + +void ZedCameraOne::updateGrabFrequency() +{ + double elapsed_sec = _grabFreqTimer.toc(); + _grabPeriodMean_sec->addValue(elapsed_sec); + _grabFreqTimer.tic(); +} + +bool ZedCameraOne::performCameraGrab() +{ + _grabStatus = _zed->grab(); + if (_grabStatus != sl::ERROR_CODE::SUCCESS) { + if (_svoMode && _grabStatus == sl::ERROR_CODE::END_OF_SVOFILE_REACHED) { + // ----> Check SVO status + if (_svoLoop) { + _svoLoopCount++; + _zed->setSVOPosition(_svoFrameStart); + RCLCPP_WARN_STREAM( + get_logger(), + "SVO reached the end and it has been restarted from frame #" + << _svoFrameStart); + rclcpp::sleep_for( + std::chrono::microseconds( + static_cast(_grabPeriodMean_sec->getAvg() * 1e6))); + return true; + } else { + // ----> Stop all the other threads and Timers + _threadStop = true; + if (_tempPubTimer) { + _tempPubTimer->cancel(); + } + // <---- Stop all the other threads and Timers + + RCLCPP_WARN(get_logger(), "SVO reached the end."); + + // Force SVO status update + if (!publishSvoStatus(_frameTimestamp.nanoseconds())) { + RCLCPP_WARN(get_logger(), "Node stopped. Press Ctrl+C to exit."); + return false; + } else { + if (_pubSvoStatus) { + RCLCPP_WARN_STREAM( + get_logger(), + "Waiting for SVO status subscribers to " + "unsubscribe. Active subscribers: " + << _pubSvoStatus->get_subscription_count()); + } + _diagUpdater.force_update(); + rclcpp::sleep_for(1s); + return true; + } + } + //<---- Check SVO status + } else if (_grabStatus == sl::ERROR_CODE::CAMERA_REBOOTING) { + RCLCPP_ERROR_STREAM( + get_logger(), "Connection issue detected: " << sl::toString( + _grabStatus).c_str()); + rclcpp::sleep_for(1s); + return true; + } else if (_grabStatus == sl::ERROR_CODE::CAMERA_NOT_INITIALIZED || + _grabStatus == sl::ERROR_CODE::FAILURE) + { + RCLCPP_ERROR_STREAM( + get_logger(), "Camera issue detected: " + << sl::toString(_grabStatus).c_str() << ". " << sl::toVerbose(_grabStatus).c_str() + << ". Trying to recover the connection..."); + rclcpp::sleep_for(1s); + return true; + } else { + RCLCPP_ERROR_STREAM( + get_logger(), "Critical camera error: " + << sl::toString(_grabStatus).c_str() << ". " << sl::toVerbose(_grabStatus).c_str() + << ". NODE KILLED."); + _zed.reset(); + exit(EXIT_FAILURE); + } + } + _frameCount++; + return true; +} + +void ZedCameraOne::updateFrameTimestamp() +{ + _sdkGrabTS = _zed->getTimestamp(sl::TIME_REFERENCE::IMAGE); + + if (_svoMode) { + if (_useSvoTimestamp) { + _frameTimestamp = sl_tools::slTime2Ros(_sdkGrabTS); + } else { + _frameTimestamp = sl_tools::slTime2Ros(_zed->getTimestamp(sl::TIME_REFERENCE::CURRENT)); + } + } else { + _frameTimestamp = sl_tools::slTime2Ros(_sdkGrabTS); + } +} + +void ZedCameraOne::publishSvoClock() +{ + if (_svoMode) { + // ----> Publish Clock if required + if (_useSvoTimestamp && _publishSvoClock) { + publishClock(_zed->getTimestamp(sl::TIME_REFERENCE::IMAGE)); + } + // <---- Publish Clock if required + } +} + +void ZedCameraOne::publishClock(const sl::Timestamp & ts) +{ + DEBUG_COMM("Publishing clock"); + + if (!_pubClock) { + return; + } + + size_t subCount = 0; + try { + subCount = _pubClock->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_COMM("publishClock: Exception while counting subscribers"); + return; + } + + if (subCount == 0) { + return; + } + + auto msg = std::make_unique(); + msg->clock = sl_tools::slTime2Ros(ts); + + if (_pubClock) { + _pubClock->publish(std::move(msg)); + } +} + +bool ZedCameraOne::publishSvoStatus(uint64_t frame_ts) +{ + if (!_svoMode) { + return false; + } + + if (!_pubSvoStatus) { + return false; + } + + size_t subCount = 0; + try { + subCount = _pubSvoStatus->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("publishSvoStatus: Exception while counting subscribers"); + return false; + } + + if (subCount > 0) { + auto msg = std::make_unique(); + + // ----> Fill the status message + msg->file_name = _svoFilepath; + msg->frame_id = _zed->getSVOPosition(); + msg->total_frames = _zed->getSVONumberOfFrames(); + msg->frame_ts = frame_ts; + + if (_svoPause) { + msg->status = zed_msgs::msg::SvoStatus::STATUS_PAUSED; + } else if (_grabStatus == sl::ERROR_CODE::END_OF_SVOFILE_REACHED) { + msg->frame_id = msg->total_frames - 1; + msg->status = zed_msgs::msg::SvoStatus::STATUS_END; + } else { + msg->status = zed_msgs::msg::SvoStatus::STATUS_PLAYING; + } + + msg->loop_active = _svoLoop; + msg->loop_count = _svoLoopCount; + msg->real_time_mode = _svoRealtime; + // <---- Fill the status message + + // Publish the message + if (_pubSvoStatus) { + _pubSvoStatus->publish(std::move(msg)); + } + return true; + } + return false; +} + +void ZedCameraOne::callback_pubHeartbeat() +{ + if (!_pubHeartbeatStatus) { + return; + } + + if (!_publishStatus) { + return; + } + + if (_threadStop) { + return; + } + + // ----> Count the subscribers + size_t sub_count = 0; + try { + sub_count = _pubHeartbeatStatus->get_subscription_count(); + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("publishHeartbeat: Exception while counting subscribers"); + return; + } + + if (sub_count == 0) { + return; + } + // <---- Count the subscribers + + // ----> Fill the message + auto msg = std::make_unique(); + msg->beat_count = ++_heartbeatCount; + msg->camera_sn = _camSerialNumber; + msg->full_name = this->get_fully_qualified_name(); + msg->node_name = this->get_name(); + msg->node_ns = this->get_namespace(); + msg->simul_mode = false; //mSimMode; + msg->svo_mode = _svoMode; + // <---- Fill the message + + // Publish the heartbeat + if (_pubHeartbeatStatus) { + _pubHeartbeatStatus->publish(std::move(msg)); + } +} + +void ZedCameraOne::handleStreamingServer() +{ + if (_streamingServerRequired && !_streamingServerRunning) { + DEBUG_STR("Streaming server required, but not running"); + startStreamingServer(); + } +} + +void ZedCameraOne::handleSvoRecordingStatus() +{ + { + std::lock_guard lock(_recMutex); + if (_recording) { + _recStatus = _zed->getRecordingStatus(); + if (!_recStatus.status) { + rclcpp::Clock steady_clock(RCL_STEADY_TIME); + RCLCPP_WARN_THROTTLE(get_logger(), steady_clock, 1000.0, "Error saving frame to SVO"); + } + } + } +} + +bool ZedCameraOne::startStreamingServer() +{ + DEBUG_STR("Starting streaming server"); + + if (_zed->isStreamingEnabled()) { + _zed->disableStreaming(); + RCLCPP_WARN(get_logger(), "A streaming server was already running and has been stopped"); + } + + sl::StreamingParameters params; + params.adaptative_bitrate = _streamingServerAdaptiveBitrate; + params.bitrate = _streamingServerBitrate; + params.chunk_size = _streamingServerChunckSize; + params.codec = _streamingServerCodec; + params.gop_size = _streamingServerGopSize; + params.port = _streamingServerPort; + params.target_framerate = _streamingServerTargetFramerate; + + sl::ERROR_CODE res; + res = _zed->enableStreaming(params); + if (res != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), "Error starting the Streaming server: " << sl::toString( + res) << " - " << sl::toVerbose(res)); + _streamingServerRunning = false; + } else { + _streamingServerRunning = true; + RCLCPP_INFO(get_logger(), "Streaming server started"); + } + return _streamingServerRunning; +} + +void ZedCameraOne::stopStreamingServer() +{ + if (_zed->isStreamingEnabled()) { + _zed->disableStreaming(); + RCLCPP_INFO(get_logger(), "Streaming server stopped"); + } else { + RCLCPP_WARN(get_logger(), "A streaming server was not enabled."); + } + + _streamingServerRunning = false; + _streamingServerRequired = false; +} + + +void ZedCameraOne::callback_enableStreaming( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + if (req->data) { + if (_streamingServerRunning) { + RCLCPP_WARN(get_logger(), "A Streaming Server is already running"); + res->message = "A Streaming Server is already running"; + res->success = false; + return; + } + // Start + if (startStreamingServer()) { + res->message = "Streaming Server started"; + res->success = true; + return; + } else { + res->message = + "Error occurred starting the Streaming Server. Read the log for more " + "info"; + res->success = false; + return; + } + } else { + // Stop + if (!_streamingServerRunning) { + RCLCPP_WARN( + get_logger(), + "There is no Streaming Server active to be stopped"); + res->message = "There is no Streaming Server active to be stopped"; + res->success = false; + return; + } + + RCLCPP_INFO(get_logger(), "Stopping the Streaming Server"); + stopStreamingServer(); + + res->message = "Streaming Server stopped"; + res->success = true; + return; + } +} + +void ZedCameraOne::callback_startSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Start SVO Recording service called **"); + + if (_svoMode) { + RCLCPP_WARN( + get_logger(), + "Cannot start SVO recording while playing SVO as input"); + res->message = "Cannot start SVO recording while playing SVO as input"; + res->success = false; + return; + } + + std::lock_guard lock(_recMutex); + + if (_recording) { + RCLCPP_WARN(get_logger(), "SVO Recording is already enabled"); + res->message = "SVO Recording is already enabled"; + res->success = false; + return; + } + + _svoRecBitrate = req->bitrate; + if ((_svoRecBitrate != 0) && + ((_svoRecBitrate < 1000) || (_svoRecBitrate > 60000))) + { + RCLCPP_WARN( + get_logger(), + "'bitrate' value not valid. Please use a value " + "in range [1000,60000], or 0 for default"); + res->message = + "'bitrate' value not valid. Please use a value in range " + "[1000,60000], or 0 for default"; + res->success = false; + return; + } + + if (req->compression_mode < 0 || req->compression_mode > 5) { + RCLCPP_WARN( + get_logger(), + "'compression_mode' mode not valid. Please use a value in " + "range [0,5]"); + res->message = + "'compression_mode' mode not valid. Please use a value in range " + "[0,5]"; + res->success = false; + return; + } + switch (req->compression_mode) { + case 1: + _svoRecCompression = sl::SVO_COMPRESSION_MODE::H264; + break; + case 3: + _svoRecCompression = sl::SVO_COMPRESSION_MODE::H264_LOSSLESS; + break; + case 4: + _svoRecCompression = sl::SVO_COMPRESSION_MODE::H265_LOSSLESS; + break; + case 5: + _svoRecCompression = sl::SVO_COMPRESSION_MODE::LOSSLESS; + break; + default: + _svoRecCompression = sl::SVO_COMPRESSION_MODE::H265; + break; + } + _svoRecFramerate = req->target_framerate; + _svoRecTranscode = req->input_transcode; + _svoRecFilename = req->svo_filename; + + if (_svoRecFilename.empty()) { + _svoRecFilename = "zed_one.svo2"; + } + + std::string err; + + if (!startSvoRecording(err)) { + res->message = "Error starting SVO recording: " + err; + res->success = false; + return; + } + + RCLCPP_INFO(get_logger(), "SVO Recording started: "); + RCLCPP_INFO_STREAM(get_logger(), " * Bitrate: " << _svoRecBitrate); + RCLCPP_INFO_STREAM( + get_logger(), + " * Compression mode: " << sl::toString(_svoRecCompression).c_str()); + RCLCPP_INFO_STREAM(get_logger(), " * Framerate: " << _svoRecFramerate); + RCLCPP_INFO_STREAM( + get_logger(), + " * Input Transcode: " << (_svoRecTranscode ? "TRUE" : "FALSE")); + RCLCPP_INFO_STREAM( + get_logger(), " * Filename: " << (_svoRecFilename.empty() ? + "zed_one.svo2" : + _svoRecFilename)); + + res->message = "SVO Recording started"; + res->success = true; +} + +void ZedCameraOne::callback_stopSvoRec( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + (void)req; + + RCLCPP_INFO(get_logger(), "** Stop SVO Recording service called **"); + + std::lock_guard lock(_recMutex); + + if (!_recording) { + RCLCPP_WARN(get_logger(), "SVO Recording is NOT enabled"); + res->message = "SVO Recording is NOT enabled"; + res->success = false; + return; + } + + stopSvoRecording(); + + RCLCPP_INFO(get_logger(), "SVO Recording stopped"); + res->message = "SVO Recording stopped"; + res->success = true; +} + +void ZedCameraOne::callback_pauseSvoInput( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Pause SVO Input service called **"); + + std::lock_guard lock(_recMutex); + + if (!_svoMode) { + RCLCPP_WARN(get_logger(), "The node is not using an SVO as input"); + res->message = "The node is not using an SVO as inpu"; + res->success = false; + return; + } + + if (_svoRealtime) { + RCLCPP_WARN( + get_logger(), + "SVO input can be paused only if SVO is not in RealTime mode"); + res->message = + "SVO input can be paused only if SVO is not in RealTime mode"; + res->success = false; + _svoPause = false; + return; + } + + if (!_svoPause) { + RCLCPP_WARN(get_logger(), "SVO is paused"); + res->message = "SVO is paused"; + _svoPause = true; + } else { + RCLCPP_WARN(get_logger(), "SVO is playing"); + res->message = "SVO is playing"; + _svoPause = false; + } + res->success = true; +} + +void ZedCameraOne::callback_setSvoFrame( + const std::shared_ptr request_header, + const std::shared_ptr req, + std::shared_ptr res) +{ + (void)request_header; + + RCLCPP_INFO(get_logger(), "** Set SVO Frame service called **"); + + constexpr double SVO_FRAME_SET_MIN_INTERVAL = + 0.5; // Minimum interval in seconds between frame changes to prevent + // excessive seeking + + // ----> Check service call frequency + if (_setSvoFrameCheckTimer.toc() < SVO_FRAME_SET_MIN_INTERVAL) { + RCLCPP_WARN(get_logger(), "SVO frame set too fast"); + res->message = "SVO frame set too fast"; + res->success = false; + return; + } + _setSvoFrameCheckTimer.tic(); + // <---- Check service call frequency + + std::lock_guard lock(_recMutex); + + if (!_svoMode) { + RCLCPP_WARN(get_logger(), "The node is not using an SVO as input"); + res->message = "The node is not using an SVO as input"; + res->success = false; + return; + } + + int frame = req->frame_id; + int svo_frames = _zed->getSVONumberOfFrames(); + if (frame >= svo_frames) { + std::stringstream ss; + ss << "Frame number is out of range. SVO has " << svo_frames << " frames"; + RCLCPP_WARN(get_logger(), ss.str().c_str()); + res->message = ss.str(); + res->success = false; + return; + } + + _zed->setSVOPosition(frame); + RCLCPP_INFO_STREAM(get_logger(), "SVO frame set to " << frame); + res->message = "SVO frame set to " + std::to_string(frame); + + // if svo is paused, ensure one grab can update topics + if (_svoPause) { + _grabOnce = true; + _grabImuOnce = true; + } + + res->success = true; +} + +bool ZedCameraOne::startSvoRecording(std::string & errMsg) +{ + sl::RecordingParameters params; + + params.bitrate = _svoRecBitrate; + params.compression_mode = _svoRecCompression; + params.target_framerate = _svoRecFramerate; + params.transcode_streaming_input = _svoRecTranscode; + params.video_filename = _svoRecFilename.c_str(); + + sl::ERROR_CODE err = _zed->enableRecording(params); + errMsg = sl::toString(err); + + if (err != sl::ERROR_CODE::SUCCESS) { + RCLCPP_ERROR_STREAM( + get_logger(), + "Error starting SVO recording: " << errMsg); + return false; + } + + _recording = true; + + return true; +} + +void ZedCameraOne::stopSvoRecording() +{ + if (_recording) { + _recording = false; + _zed->disableRecording(); + } +} + +void ZedCameraOne::startHeartbeatTimer() +{ + if (_heartbeatTimer != nullptr) { + _heartbeatTimer->cancel(); + } + + std::chrono::milliseconds pubPeriod_msec(HEARTBEAT_INTERVAL_MS); + _heartbeatTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCameraOne::callback_pubHeartbeat, this)); +} + +} // namespace stereolabs + +#include "rclcpp_components/register_node_macro.hpp" + +// Register the component with class_loader. +// This acts as a sort of entry point, allowing the component to be discoverable +// when its library is being loaded into a running process. +RCLCPP_COMPONENTS_REGISTER_NODE(stereolabs::ZedCameraOne) diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_sensors.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_sensors.cpp new file mode 100644 index 000000000..35f5cf909 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_sensors.cpp @@ -0,0 +1,631 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_one_component.hpp" +#include "sl_logging.hpp" + +using namespace std::chrono_literals; + +namespace stereolabs +{ + +void ZedCameraOne::getSensorsParams() +{ + rclcpp::Parameter paramVal; + + rcl_interfaces::msg::ParameterDescriptor read_only_descriptor; + read_only_descriptor.read_only = true; + + RCLCPP_INFO(get_logger(), "=== SENSORS parameters ==="); + + sl_tools::getParam( + shared_from_this(), "sensors.publish_imu_tf", + _publishSensImuTF, _publishSensImuTF, " * Publish IMU TF: "); + sl_tools::getParam( + shared_from_this(), "sensors.sensors_pub_rate", + _sensPubRate, _sensPubRate, + " * Sensors publishing rate [Hz]: ", true, 1.0, 400.0); +} + +void ZedCameraOne::initSensorPublishers() +{ + RCLCPP_INFO(get_logger(), " +++ SENSORS TOPICS +++"); + + // ----> Advertised topics + const std::string imu_topic_root = "imu/"; + const std::string imu_topic = imu_topic_root + "data"; + const std::string imu_raw_topic = imu_topic_root + "data_raw"; + const std::string temp_topic = "temperature"; + + // Helper to build topic names + auto make_topic = + [&](const std::string & type) { + std::string topic = _topicRoot + type; + return get_node_topics_interface()->resolve_topic_name(topic); + }; + + _sensImuTopic = make_topic(imu_topic); + _sensImuRawTopic = make_topic(imu_raw_topic); + _sensTempTopic = make_topic(temp_topic); + // <---- Advertised topics + + // ----> Create publishers + + // Sensors publishers + if (_publishSensImu) { + _pubImu = this->create_publisher(_sensImuTopic, _qos, _pubOpt); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << _pubImu->get_topic_name()); + } + + if (_publishSensImuRaw) { + _pubImuRaw = this->create_publisher(_sensImuRawTopic, _qos, _pubOpt); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << _pubImuRaw->get_topic_name()); + } + + if (_publishSensTemp) { + _pubTemp = this->create_publisher(_sensTempTopic, _qos, _pubOpt); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << _pubTemp->get_topic_name()); + } + // <---- Create publishers +} + +void ZedCameraOne::threadFunc_pubSensorsData() +{ + DEBUG_STREAM_SENS("Sensors thread started"); + + // Set the name of the pubSensorsData thread for easier identification in + // system monitors + pthread_setname_np(pthread_self(), (get_name() + std::string("_pubSensorsData")).c_str()); + setupSensorThreadScheduling(); + + DEBUG_STREAM_SENS("Sensors thread loop starting..."); + _lastTs_imu = TIMEZERO_ROS; + + constexpr auto SVO_PAUSE_POLL_INTERVAL = + 100ms; // Poll interval when SVO is paused + + while (true) { + if (handleSensorThreadInterruption()) {break;} + + if (_svoMode && _svoPause) { + if (!_grabImuOnce) { + rclcpp::sleep_for(SVO_PAUSE_POLL_INTERVAL); + continue; + } else { + _grabImuOnce = false; // Reset the flag and grab the IMU data + } + } + + if (!waitForCameraOpen()) {continue;} + if (!waitForSensorSubscribers()) {continue;} + if (!handleSensorPublishing()) {continue;} + adjustSensorPublishingFrequency(); + } + + DEBUG_STREAM_SENS("Sensors thread finished"); +} + +// Helper: Setup thread scheduling for sensors thread +void ZedCameraOne::setupSensorThreadScheduling() +{ + if (_changeThreadSched) { + DEBUG_STREAM_ADV("Sensors thread settings"); + if (_debugAdvanced) { + int policy; + sched_param par; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * Default Sensors thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + + sched_param par; + par.sched_priority = + (_threadSchedPolicy == "SCHED_FIFO" || _threadSchedPolicy == "SCHED_RR") ? + _threadPrioSens : + 0; + int sched_policy = SCHED_OTHER; + if (_threadSchedPolicy == "SCHED_BATCH") { + sched_policy = SCHED_BATCH; + } else if (_threadSchedPolicy == "SCHED_FIFO") { + sched_policy = SCHED_FIFO; + } else if (_threadSchedPolicy == "SCHED_RR") { + sched_policy = SCHED_RR; + } + + if (pthread_setschedparam(pthread_self(), sched_policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to set thread params! - " + << std::strerror(errno)); + } + + if (_debugAdvanced) { + int policy; + if (pthread_getschedparam(pthread_self(), &policy, &par)) { + RCLCPP_WARN_STREAM( + get_logger(), " ! Failed to get thread policy! - " + << std::strerror(errno)); + } else { + DEBUG_STREAM_ADV( + " * New Sensors thread (#" + << pthread_self() << ") settings - Policy: " + << sl_tools::threadSched2Str(policy).c_str() + << " - Priority: " << par.sched_priority); + } + } + } +} + +// Helper: Handle thread interruption and shutdown +bool ZedCameraOne::handleSensorThreadInterruption() +{ + try { + if (!rclcpp::ok()) { + DEBUG_STREAM_SENS("Ctrl+C received: stopping sensors thread"); + _threadStop = true; + return true; + } + if (_threadStop) { + DEBUG_STREAM_SENS("[threadFunc_pubSensorsData] (2): Sensors thread stopped"); + return true; + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_COMM("[threadFunc_pubSensorsData] Generic exception."); + return false; + } + return false; +} + +// Helper: Wait for camera to be open +bool ZedCameraOne::waitForCameraOpen() +{ + if (!_zed->isOpened()) { + DEBUG_STREAM_SENS("[threadFunc_pubSensorsData] the camera is not open"); + rclcpp::sleep_for(200ms); + return false; + } + return true; +} + +// Helper: Wait for sensor topic subscribers +bool ZedCameraOne::waitForSensorSubscribers() +{ + _imuPublishing = areSensorsTopicsSubscribed(); + if (!_imuPublishing && !_publishSensImuTF) { + rclcpp::sleep_for(200ms); + return false; + } + return true; +} + +// Helper: Handle sensor publishing and sleep if needed +bool ZedCameraOne::handleSensorPublishing() +{ + if (!publishSensorsData()) { + auto sleep_msec = static_cast(_sensRateComp * (1000. / _sensPubRate)); + sleep_msec = std::max(1, sleep_msec); + DEBUG_STREAM_SENS("[threadFunc_pubSensorsData] Thread sleep: " << sleep_msec << " msec"); + rclcpp::sleep_for(std::chrono::milliseconds(sleep_msec)); + return false; + } + return true; +} + +// Helper: Adjust publishing frequency compensation +void ZedCameraOne::adjustSensorPublishingFrequency() +{ + double avg_freq = 1. / _imuPeriodMean_sec->getAvg(); + double err = std::fabs(_sensPubRate - avg_freq); + const double COMP_P_GAIN = 0.0005; + + if (avg_freq < _sensPubRate) { + _sensRateComp -= COMP_P_GAIN * err; + } else if (avg_freq > _sensPubRate) { + _sensRateComp += COMP_P_GAIN * err; + } + + _sensRateComp = std::max(0.05, _sensRateComp); + _sensRateComp = std::min(2.0, _sensRateComp); + DEBUG_STREAM_SENS("[threadFunc_pubSensorsData] _sensRateComp: " << _sensRateComp); +} + +void ZedCameraOne::startTempPubTimer() +{ + if (_tempPubTimer != nullptr) { + _tempPubTimer->cancel(); + } + + std::chrono::milliseconds pubPeriod_msec(TEMP_PUB_INTERVAL_MS); + _tempPubTimer = create_wall_timer( + std::chrono::duration_cast(pubPeriod_msec), + std::bind(&ZedCameraOne::callback_pubTemp, this)); +} + +void ZedCameraOne::callback_pubTemp() +{ + DEBUG_STREAM_ONCE_SENS("Temperatures callback called"); + + if (_grabStatus != sl::ERROR_CODE::SUCCESS) { + DEBUG_SENS("Camera not ready"); + return; + } + + // ----> Always update temperature values for diagnostic + sl::SensorsData sens_data; + sl::ERROR_CODE err = _zed->getSensorsData(sens_data, sl::TIME_REFERENCE::CURRENT); + if (err != sl::ERROR_CODE::SUCCESS) { + // Only warn if not in SVO mode or if the error is not a benign sensor + // unavailability + if (!_svoMode || err != sl::ERROR_CODE::SENSORS_NOT_AVAILABLE) { + RCLCPP_WARN_STREAM( + get_logger(), + "[callback_pubTemp] sl::getSensorsData error: " + << sl::toString(err).c_str()); + } + return; + } + + sens_data.temperature.get( + sl::SensorsData::TemperatureData::SENSOR_LOCATION::IMU, _tempImu); + DEBUG_STREAM_SENS("Camera temperature: " << _tempImu << "°C"); + // <---- Always update temperature values for diagnostic + + // ----> Subscribers count + size_t tempSubCount = 0; + + try { + if (_pubTemp) { + tempSubCount = count_subscribers(_pubTemp->get_topic_name()); + DEBUG_STREAM_SENS("Temperature subscribers: " << static_cast(tempSubCount)); + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_SENS( + "callback_pubTemp: Exception while counting subscribers"); + return; + } + // <---- Subscribers count + + // ----> Publish temperature + if (tempSubCount > 0) { + auto imuTempMsg = std::make_unique(); + + imuTempMsg->header.stamp = get_clock()->now(); + + imuTempMsg->header.frame_id = _imuFrameId; + imuTempMsg->temperature = static_cast(_tempImu); + imuTempMsg->variance = 0.0; + + DEBUG_SENS("Publishing IMU TEMP message"); + try { + if (_pubTemp) { + _pubTemp->publish(std::move(imuTempMsg)); + } + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } + } + // <---- Publish temperature +} + +bool ZedCameraOne::publishSensorsData() +{ + if (_grabStatus != sl::ERROR_CODE::SUCCESS) { + DEBUG_SENS("Camera not ready"); + return false; + } + + sl::SensorsData sens_data; + sl::ERROR_CODE err = _zed->getSensorsData(sens_data, sl::TIME_REFERENCE::CURRENT); + if (err != sl::ERROR_CODE::SUCCESS) { + // Only warn if not in SVO mode or if the error is not a benign sensor unavailability + if (!_svoMode || err != sl::ERROR_CODE::SENSORS_NOT_AVAILABLE) { + RCLCPP_WARN_STREAM( + get_logger(), + "[publishSensorsData] sl::getSensorsData error: " << sl::toString(err).c_str()); + } + return false; + } + + rclcpp::Time ts_imu = sl_tools::slTime2Ros(sens_data.imu.timestamp); + double dT = ts_imu.seconds() - _lastTs_imu.seconds(); + _lastTs_imu = ts_imu; + bool new_imu_data = (dT > 0.0); + + if (!new_imu_data) { + DEBUG_STREAM_SENS("[publishSensorsData] No new sensors data"); + return false; + } + + updateImuFreqDiagnostics(dT); + + publishImuFrameAndTopic(); + + if (_imuSubCount > 0) { + publishImuMsg(ts_imu, sens_data); + } + + if (_imuRawSubCount > 0) { + publishImuRawMsg(ts_imu, sens_data); + } + + return true; +} + +void ZedCameraOne::updateImuFreqDiagnostics(double dT) +{ + DEBUG_STREAM_SENS("SENSOR LAST PERIOD: " << dT << " sec @" << 1. / dT << " Hz"); + auto elapsed = _imuFreqTimer.toc(); + _imuFreqTimer.tic(); + double mean = _imuPeriodMean_sec->addValue(elapsed); + _pubImu_sec->addValue(mean); + DEBUG_STREAM_SENS("IMU MEAN freq: " << 1. / mean); +} + +void ZedCameraOne::publishImuMsg(const rclcpp::Time & ts_imu, const sl::SensorsData & sens_data) +{ + if (!_pubImu) { + DEBUG_STREAM_SENS("[publishImuMsg] _pubImu is null"); + return; + } + + DEBUG_STREAM_SENS( + "[publishImuMsg] IMU subscribers: " << static_cast(_imuSubCount)); + auto imuMsg = std::make_unique(); + imuMsg->header.stamp = ts_imu; + imuMsg->header.frame_id = _imuFrameId; + + imuMsg->orientation.x = sens_data.imu.pose.getOrientation()[0]; + imuMsg->orientation.y = sens_data.imu.pose.getOrientation()[1]; + imuMsg->orientation.z = sens_data.imu.pose.getOrientation()[2]; + imuMsg->orientation.w = sens_data.imu.pose.getOrientation()[3]; + + imuMsg->angular_velocity.x = sens_data.imu.angular_velocity[0] * DEG2RAD; + imuMsg->angular_velocity.y = sens_data.imu.angular_velocity[1] * DEG2RAD; + imuMsg->angular_velocity.z = sens_data.imu.angular_velocity[2] * DEG2RAD; + + imuMsg->linear_acceleration.x = sens_data.imu.linear_acceleration[0]; + imuMsg->linear_acceleration.y = sens_data.imu.linear_acceleration[1]; + imuMsg->linear_acceleration.z = sens_data.imu.linear_acceleration[2]; + + for (int i = 0; i < 3; ++i) { + int r = i; + imuMsg->orientation_covariance[i * 3 + 0] = sens_data.imu.pose_covariance.r[r * 3 + 0] * + DEG2RAD * DEG2RAD; + imuMsg->orientation_covariance[i * 3 + 1] = sens_data.imu.pose_covariance.r[r * 3 + 1] * + DEG2RAD * DEG2RAD; + imuMsg->orientation_covariance[i * 3 + 2] = sens_data.imu.pose_covariance.r[r * 3 + 2] * + DEG2RAD * DEG2RAD; + + imuMsg->linear_acceleration_covariance[i * 3 + + 0] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 0]; + imuMsg->linear_acceleration_covariance[i * 3 + + 1] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 1]; + imuMsg->linear_acceleration_covariance[i * 3 + + 2] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 2]; + + imuMsg->angular_velocity_covariance[i * 3 + + 0] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 0] * DEG2RAD * DEG2RAD; + imuMsg->angular_velocity_covariance[i * 3 + + 1] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 1] * DEG2RAD * DEG2RAD; + imuMsg->angular_velocity_covariance[i * 3 + + 2] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 2] * DEG2RAD * DEG2RAD; + } + + try { + _pubImu->publish(std::move(imuMsg)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } +} + +void ZedCameraOne::publishImuRawMsg(const rclcpp::Time & ts_imu, const sl::SensorsData & sens_data) +{ + if (!_pubImuRaw) { + DEBUG_STREAM_SENS("[publishImuRawMsg] _pubImuRaw is null"); + return; + } + + DEBUG_STREAM_SENS( + "[publishImuRawMsg] IMU RAW subscribers: " + << static_cast(_imuRawSubCount)); + auto imuRawMsg = std::make_unique(); + imuRawMsg->header.stamp = ts_imu; + imuRawMsg->header.frame_id = _imuFrameId; + + imuRawMsg->angular_velocity.x = sens_data.imu.angular_velocity_uncalibrated[0] * DEG2RAD; + imuRawMsg->angular_velocity.y = sens_data.imu.angular_velocity_uncalibrated[1] * DEG2RAD; + imuRawMsg->angular_velocity.z = sens_data.imu.angular_velocity_uncalibrated[2] * DEG2RAD; + + imuRawMsg->linear_acceleration.x = sens_data.imu.linear_acceleration_uncalibrated[0]; + imuRawMsg->linear_acceleration.y = sens_data.imu.linear_acceleration_uncalibrated[1]; + imuRawMsg->linear_acceleration.z = sens_data.imu.linear_acceleration_uncalibrated[2]; + + for (int i = 0; i < 3; ++i) { + int r = i; + imuRawMsg->linear_acceleration_covariance[i * 3 + + 0] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 0]; + imuRawMsg->linear_acceleration_covariance[i * 3 + + 1] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 1]; + imuRawMsg->linear_acceleration_covariance[i * 3 + + 2] = sens_data.imu.linear_acceleration_covariance.r[r * 3 + 2]; + + imuRawMsg->angular_velocity_covariance[i * 3 + + 0] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 0] * DEG2RAD * DEG2RAD; + imuRawMsg->angular_velocity_covariance[i * 3 + + 1] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 1] * DEG2RAD * DEG2RAD; + imuRawMsg->angular_velocity_covariance[i * 3 + + 2] = sens_data.imu.angular_velocity_covariance.r[r * 3 + 2] * DEG2RAD * DEG2RAD; + } + + try { + _pubImuRaw->publish(std::move(imuRawMsg)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } +} + +void ZedCameraOne::publishImuFrameAndTopic() +{ + if (!_publishSensImuTF && !_publishSensImuTransf) { + return; + } + + if (!_usingIPC && _staticImuTfPublished) { + DEBUG_ONCE_TF( + "Static Imu TF and Transient Local message already published"); + return; + } + + sl::Orientation sl_rot = _slCamImuTransf.getOrientation(); + sl::Translation sl_tr = _slCamImuTransf.getTranslation(); + + auto cameraImuTransfMsg = std::make_unique(); + + cameraImuTransfMsg->header.stamp = get_clock()->now(); + cameraImuTransfMsg->header.frame_id = _camImgFrameId; + cameraImuTransfMsg->child_frame_id = _imuFrameId; + + cameraImuTransfMsg->transform.rotation.x = sl_rot.ox; + cameraImuTransfMsg->transform.rotation.y = sl_rot.oy; + cameraImuTransfMsg->transform.rotation.z = sl_rot.oz; + cameraImuTransfMsg->transform.rotation.w = sl_rot.ow; + + cameraImuTransfMsg->transform.translation.x = sl_tr.x; + cameraImuTransfMsg->transform.translation.y = sl_tr.y; + cameraImuTransfMsg->transform.translation.z = sl_tr.z; + + // ----> Publish CAM/IMU Transform + if (_publishSensImuTransf) { + try { + size_t sub_count = 0; + if (_pubCamImuTransf) { + sub_count = count_subscribers(_pubCamImuTransf->get_topic_name()); + DEBUG_STREAM_SENS("Camera-IMU Transform subscribers: " << static_cast(sub_count)); + } + + if (sub_count && _pubCamImuTransf) { + _pubCamImuTransf->publish(std::move(cameraImuTransfMsg)); + } + } catch (const std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception."); + } + } + // <---- Publish CAM/IMU Transform + + // ----> Broadcast CAM/IMU TF + if (!_publishSensImuTF) { + return; + } + + auto transformStamped = std::make_unique(); + + transformStamped->header.stamp = get_clock()->now(); + transformStamped->header.frame_id = _camImgFrameId; + transformStamped->child_frame_id = _imuFrameId; + + transformStamped->transform.rotation.x = sl_rot.ox; + transformStamped->transform.rotation.y = sl_rot.oy; + transformStamped->transform.rotation.z = sl_rot.oz; + transformStamped->transform.rotation.w = sl_rot.ow; + + transformStamped->transform.translation.x = sl_tr.x; + transformStamped->transform.translation.y = sl_tr.y; + transformStamped->transform.translation.z = sl_tr.z; + + if (_usingIPC) { + _tfBroadcaster->sendTransform(*transformStamped); + DEBUG_STREAM_TF( + "Broadcasted new dynamic transform: " + << transformStamped->header.frame_id << " -> " << transformStamped->child_frame_id); + } else { + _staticTfBroadcaster->sendTransform(*transformStamped); + DEBUG_STREAM_TF( + "Broadcasted new static transform: " + << transformStamped->header.frame_id << " -> " << transformStamped->child_frame_id); + } + + double elapsed_sec = _imuTfFreqTimer.toc(); + _pubImuTF_sec->addValue(elapsed_sec); + _imuTfFreqTimer.tic(); + // <---- Broadcast CAM/IMU TF + + // Debug info + if (_debugTf) { + double roll, pitch, yaw; + tf2::Matrix3x3( + tf2::Quaternion( + transformStamped->transform.rotation.x, + transformStamped->transform.rotation.y, + transformStamped->transform.rotation.z, + transformStamped->transform.rotation.w)) + .getRPY(roll, pitch, yaw); + DEBUG_STREAM_TF( + "TF [" << transformStamped->header.frame_id << " -> " + << transformStamped->child_frame_id << "] Position: (" + << transformStamped->transform.translation.x << ", " + << transformStamped->transform.translation.y << ", " + << transformStamped->transform.translation.z + << ") - Orientation RPY: (" << roll * RAD2DEG << ", " + << pitch * RAD2DEG << ", " << yaw * RAD2DEG << ")"); + } + + _staticImuTfPublished = true; +} + +bool ZedCameraOne::areSensorsTopicsSubscribed() +{ + try { + if (_pubImu) { + _imuSubCount = _pubImu->get_subscription_count(); + } else { + _imuSubCount = 0; + } + if (_pubImuRaw) { + _imuRawSubCount = _pubImuRaw->get_subscription_count(); + } else { + _imuRawSubCount = 0; + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_SENS( + "areSensorsTopicsSubscribed: Exception while counting subscribers"); + return false; + } + + DEBUG_STREAM_SENS( + "[areSensorsTopicsSubscribed] IMU subscribers: " << _imuSubCount); + DEBUG_STREAM_SENS( + "[areSensorsTopicsSubscribed] IMU RAW subscribers: " << _imuRawSubCount); + + return (_imuSubCount + _imuRawSubCount) > 0; +} + +} // namespace stereolabs diff --git a/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_video.cpp b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_video.cpp new file mode 100644 index 000000000..5a90ff5a0 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_components/src/zed_camera_one/src/zed_camera_one_component_video.cpp @@ -0,0 +1,1121 @@ +// Copyright 2025 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "zed_camera_one_component.hpp" +#include "sl_logging.hpp" + +#include + +#include +#include + +namespace stereolabs +{ + +void ZedCameraOne::getVideoParams() +{ + rclcpp::Parameter paramVal; + + RCLCPP_INFO(get_logger(), "=== CAMERA CONTROL parameters ==="); + + sl_tools::getParam( + shared_from_this(), "video.enable_hdr", _enableHDR, + _enableHDR, " * Enable HDR: "); + + sl_tools::getParam( + shared_from_this(), "video.saturation", _camSaturation, + _camSaturation, " * Saturation: ", true, 0, 8); + _camDynParMapChanged["video.saturation"] = true; + sl_tools::getParam( + shared_from_this(), "video.sharpness", _camSharpness, + _camSharpness, " * Sharpness: ", true, 0, 8); + _camDynParMapChanged["video.sharpness"] = true; + sl_tools::getParam( + shared_from_this(), "video.gamma", _camGamma, _camGamma, + " * Gamma: ", true, 1, 9); + _camDynParMapChanged["video.gamma"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_whitebalance", _camAutoWB, + _camAutoWB, " * Auto White Balance: ", true); + _camDynParMapChanged["video.auto_whitebalance"] = true; + sl_tools::getParam( + shared_from_this(), "video.whitebalance_temperature", + _camWBTemp, _camWBTemp, + " * White Balance Temp (x100): ", true, 28, 65); + _camDynParMapChanged["video.whitebalance_temperature"] = true; + + sl_tools::getParam( + shared_from_this(), "video.auto_exposure", + _camAutoExposure, _camAutoExposure, + " * Auto Exposure: ", true); + _camDynParMapChanged["video.auto_exposure"] = true; + sl_tools::getParam( + shared_from_this(), "video.exposure_time", _camExpTime, + _camExpTime, " * Exposure (us): ", true, 28, 30000); + _camDynParMapChanged["video.exposure_time"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_exposure_time_range_min", + _camAutoExpTimeRangeMin, _camAutoExpTimeRangeMin, + " * Auto Exp Time Min (us): ", true, 28, 30000); + _camDynParMapChanged["video.auto_exposure_time_range_min"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_exposure_time_range_max", + _camAutoExpTimeRangeMax, _camAutoExpTimeRangeMax, + " * Auto Exp Time Max (us): ", true, 28, 30000); + _camDynParMapChanged["video.auto_exposure_time_range_max"] = true; + sl_tools::getParam( + shared_from_this(), "video.exposure_compensation", + _camExposureComp, _camExposureComp, + " * Exposure Compensation: ", true, 0, 100); + _camDynParMapChanged["video.exposure_compensation"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_analog_gain", + _camAutoAnalogGain, _camAutoAnalogGain, + " * Auto Analog Gain: ", true); + _camDynParMapChanged["video.auto_analog_gain"] = true; + sl_tools::getParam( + shared_from_this(), "video.analog_gain", _camAnalogGain, + _camAnalogGain, " * Analog Gain: ", true, 1000, 16000); + _camDynParMapChanged["video.analog_gain"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_analog_gain_range_min", + _camAutoAnalogGainRangeMin, _camAutoAnalogGainRangeMin, + " * Analog Gain Min: ", true, 1000, 16000); + _camDynParMapChanged["video.auto_analog_gain_range_min"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_analog_gain_range_max", + _camAutoAnalogGainRangeMax, _camAutoAnalogGainRangeMax, + " * Analog Gain Max: ", true, 1000, 16000); + _camDynParMapChanged["video.auto_analog_gain_range_max"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_digital_gain", + _camAutoDigitalGain, _camAutoDigitalGain, + " * Auto Digital Gain: ", true); + _camDynParMapChanged["video.auto_digital_gain"] = true; + sl_tools::getParam( + shared_from_this(), "video.digital_gain", _camDigitalGain, + _camDigitalGain, " * Digital Gain: ", true, 1, 256); + _camDynParMapChanged["video.digital_gain"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_digital_gain_range_min", + _camAutoDigitalGainRangeMin, _camAutoDigitalGainRangeMin, + " * Digital Gain Min: ", true, 1, 256); + _camDynParMapChanged["video.auto_digital_gain_range_min"] = true; + sl_tools::getParam( + shared_from_this(), "video.auto_digital_gain_range_max", + _camAutoDigitalGainRangeMax, _camAutoDigitalGainRangeMax, + " * Digital Gain Max: ", true, 1, 256); + _camDynParMapChanged["video.auto_digital_gain_range_max"] = true; + sl_tools::getParam( + shared_from_this(), "video.denoising", _camDenoising, + _camDenoising, " * Denoising: ", true, 0, 100); + _camDynParMapChanged["video.denoising"] = true; + + _triggerUpdateDynParams = true; +} + +void ZedCameraOne::initVideoPublishers() +{ + RCLCPP_INFO(get_logger(), " +++ IMAGE TOPICS +++"); + + // ----> Advertised topics + const std::string sensor = "rgb/"; + const std::string rect_prefix = "rect/"; + const std::string raw_prefix = "raw/"; + const std::string color_prefix = "color/"; + const std::string gray_prefix = "gray/"; + const std::string image_topic = "image"; + + // Helper to build topic names + auto make_topic = + [&](const std::string & sensor, const std::string & color_mode, const std::string & rect_raw, + const std::string & type) { + std::string topic = _topicRoot + sensor + color_mode + rect_raw + type; + return get_node_topics_interface()->resolve_topic_name(topic); + }; + + _imgColorTopic = make_topic(sensor, color_prefix, rect_prefix, image_topic); + _imgColorRawTopic = make_topic(sensor, color_prefix, raw_prefix, image_topic); + _imgGrayTopic = make_topic(sensor, gray_prefix, rect_prefix, image_topic); + _imgRawGrayTopic = make_topic(sensor, gray_prefix, raw_prefix, image_topic); + // <---- Advertised topics + + // ----> Create publishers + auto qos = _qos.get_rmw_qos_profile(); + + // Publishers logging + auto log_cam_pub = [&](const auto & pub) { + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << pub.getTopic()); + auto transports = image_transport::getLoadableTransports(); + for (const auto & transport : transports) { + std::string transport_copy = transport; + auto pos = transport_copy.find('/'); + if (pos != std::string::npos) { + transport_copy.erase(0, pos); + } + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " + << pub.getTopic() << transport_copy + << " [image_transport]"); + } + }; + + // Camera publishers + if (_nitrosDisabled) { + if (_publishImgRgb) { + _pubColorImg = image_transport::create_publisher(this, _imgColorTopic, qos); + log_cam_pub(_pubColorImg); + + if (_publishImgRaw) { + _pubColorRawImg = image_transport::create_publisher(this, _imgColorRawTopic, qos); + log_cam_pub(_pubColorRawImg); + } + } + + if (_publishImgGray) { + _pubGrayImg = image_transport::create_publisher(this, _imgGrayTopic, qos); + log_cam_pub(_pubGrayImg); + + if (_publishImgRaw) { + _pubGrayRawImg = image_transport::create_publisher(this, _imgRawGrayTopic, qos); + log_cam_pub(_pubGrayRawImg); + } + } + + + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + // Nitros publishers lambda + auto make_nitros_img_pub = [&](const std::string & topic) { + auto ret = std::make_shared>( + this, topic, nvidia::isaac_ros::nitros::nitros_image_bgra8_t::supported_type_name, + nvidia::isaac_ros::nitros::NitrosDiagnosticsConfig(), _qos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << topic); + RCLCPP_INFO_STREAM( + get_logger(), " * Advertised on topic: " << topic + "/nitros [isaac_ros_nitros]"); + return ret; + }; + if (_publishImgRgb) { + _nitrosPubColorImg = make_nitros_img_pub(_imgColorTopic); + + if (_publishImgRaw) { + _nitrosPubColorRawImg = make_nitros_img_pub(_imgColorRawTopic); + } + } + + if (_publishImgGray) { + _nitrosPubGrayImg = make_nitros_img_pub(_imgGrayTopic); + if (_publishImgRaw) { + _nitrosPubGrayRawImg = make_nitros_img_pub(_imgRawGrayTopic); + } + } +#endif + } + // <---- Create publishers + + // ----> Camera Info publishers + // Lambda to create and log CameraInfo publishers + auto make_cam_info_pub = [&](const std::string & topic) { + std::string info_topic = image_transport::getCameraInfoTopic(topic); + auto pub = create_publisher(info_topic, _qos); + RCLCPP_INFO_STREAM(get_logger(), " * Advertised on topic: " << pub->get_topic_name()); + return pub; + }; + + // Lambda to create and log CameraInfo publishers for image_transport or nitros + auto make_cam_info_trans_pub = [&](const std::string & topic) { + std::string info_topic = topic + "/camera_info"; + auto pub = create_publisher(info_topic, _qos); + RCLCPP_INFO_STREAM( + get_logger(), + " * Advertised on topic: " << pub->get_topic_name()); + return pub; + }; + + if (_publishImgRgb) { + _pubColorImgInfo = make_cam_info_pub(_imgColorTopic); + _pubColorImgInfoTrans = make_cam_info_trans_pub(_imgColorTopic); + + if (_publishImgRaw) { + _pubColorRawImgInfo = make_cam_info_pub(_imgColorRawTopic); + _pubColorRawImgInfoTrans = make_cam_info_trans_pub(_imgColorRawTopic); + } + } + + if (_publishImgGray) { + _pubGrayImgInfo = make_cam_info_pub(_imgGrayTopic); + _pubGrayImgInfoTrans = make_cam_info_trans_pub(_imgGrayTopic); + + if (_publishImgRaw) { + _pubGrayRawImgInfo = make_cam_info_pub(_imgRawGrayTopic); + _pubGrayRawImgInfoTrans = make_cam_info_trans_pub(_imgRawGrayTopic); + } + } + // <---- Camera Info publishers +} + +void ZedCameraOne::fillCamInfo( + sensor_msgs::msg::CameraInfo::SharedPtr camInfoMsg, + const std::string & frameId, bool rawParam) +{ + sl::CameraParameters zedParam; + + if (rawParam) { + zedParam = + _zed->getCameraInformation(_matResol).camera_configuration.calibration_parameters_raw; + } else { + zedParam = _zed->getCameraInformation(_matResol).camera_configuration.calibration_parameters; + } + + // https://docs.ros2.org/latest/api/sensor_msgs/msg/CameraInfo.html + + // ----> Distortion models + // ZED SDK params order: [ k1, k2, p1, p2, k3, k4, k5, k6, s1, s2, s3, s4] + // Radial (k1, k2, k3, k4, k5, k6), Tangential (p1,p2) and Prism (s1, s2, s3, + // s4) distortion. Prism not currently used. + + // ROS2 order (OpenCV) -> k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4 + // All ZED X One models use RATIONAL_POLYNOMIAL + camInfoMsg->distortion_model = + sensor_msgs::distortion_models::RATIONAL_POLYNOMIAL; + + camInfoMsg->d.resize(8); + for (size_t i = 0; i < 8; i++) { + camInfoMsg->d[i] = zedParam.disto[i]; + } + + // Intrinsic + camInfoMsg->k.fill(0.0); + camInfoMsg->k[0] = static_cast(zedParam.fx); + camInfoMsg->k[2] = static_cast(zedParam.cx); + camInfoMsg->k[4] = static_cast(zedParam.fy); + camInfoMsg->k[5] = static_cast(zedParam.cy); + camInfoMsg->k[8] = 1.0; + + // Rectification + camInfoMsg->r.fill(0.0); + for (size_t i = 0; i < 3; i++) { + // identity + camInfoMsg->r[i + i * 3] = 1.0; + } + + // Projection/camera matrix + camInfoMsg->p.fill(0.0); + camInfoMsg->p[0] = static_cast(zedParam.fx); + camInfoMsg->p[2] = static_cast(zedParam.cx); + camInfoMsg->p[5] = static_cast(zedParam.fy); + camInfoMsg->p[6] = static_cast(zedParam.cy); + camInfoMsg->p[10] = 1.0; + + // Image size + camInfoMsg->width = static_cast(_matResol.width); + camInfoMsg->height = static_cast(_matResol.height); + camInfoMsg->header.frame_id = frameId; +} + +void ZedCameraOne::setupCameraInfoMessages() +{ + _camInfoMsg = std::make_shared(); + _camInfoRawMsg = std::make_shared(); + + fillCamInfo(_camInfoMsg, _camOptFrameId, false); + fillCamInfo(_camInfoRawMsg, _camOptFrameId, true); +} + +bool ZedCameraOne::areImageTopicsSubscribed() +{ + _colorSubCount = 0; + _colorRawSubCount = 0; + _graySubCount = 0; + _grayRawSubCount = 0; + + try { + if (_nitrosDisabled) { + _colorSubCount = _pubColorImg.getNumSubscribers(); + _colorRawSubCount = _pubColorRawImg.getNumSubscribers(); + _graySubCount = _pubGrayImg.getNumSubscribers(); + _grayRawSubCount = _pubGrayRawImg.getNumSubscribers(); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + _colorSubCount = count_subscribers(_imgColorTopic) + count_subscribers( + _imgColorTopic + "/nitros"); + _colorRawSubCount = count_subscribers(_imgColorRawTopic) + count_subscribers( + _imgColorRawTopic + "/nitros"); + _graySubCount = count_subscribers(_imgGrayTopic) + + count_subscribers(_imgGrayTopic + "/nitros"); + _grayRawSubCount = count_subscribers(_imgRawGrayTopic) + count_subscribers( + _imgRawGrayTopic + "/nitros"); +#endif + } + } catch (...) { + rcutils_reset_error(); + DEBUG_STREAM_VD("publishImages: Exception while counting subscribers"); + return false; + } + + return (_colorSubCount + _colorRawSubCount + + _graySubCount + _grayRawSubCount + ) > 0; +} + +void ZedCameraOne::handleImageRetrievalAndPublishing() +{ + _imageSubscribed = areImageTopicsSubscribed(); + if (_imageSubscribed) { + DEBUG_STREAM_VD("Retrieving video data"); + + bool gpu = false; +#ifdef FOUND_ISAAC_ROS_NITROS + if (!_nitrosDisabled) { + gpu = true; + } +#endif + retrieveImages(gpu); + publishImages(); + _videoPublishing = true; + } else { + _videoPublishing = false; + + // Publish camera infos even if no video/depth subscribers are present + publishCameraInfos(); + } +} + +void ZedCameraOne::retrieveImages(bool gpu) +{ + bool retrieved = false; + + // ----> Retrieve all required data + DEBUG_VD("Retrieving Image Data"); + if (_colorSubCount > 0) { + retrieved |= + (sl::ERROR_CODE::SUCCESS == + _zed->retrieveImage( + _matColor, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + _24bitMode ? sl::VIEW::LEFT_BGR : sl::VIEW::LEFT_BGRA, +#else + sl::VIEW::LEFT, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, _matResol)); + DEBUG_STREAM_VD( + "Color image " << _matResol.width << "x" << _matResol.height << " retrieved - timestamp: " << + _sdkGrabTS.getNanoseconds() << + " nsec"); + } + if (_colorRawSubCount > 0) { + retrieved |= (sl::ERROR_CODE::SUCCESS == + _zed->retrieveImage( + _matColorRaw, +#if (ZED_SDK_MAJOR_VERSION * 10 + ZED_SDK_MINOR_VERSION) >= 51 + _24bitMode ? sl::VIEW::LEFT_UNRECTIFIED_BGR : sl::VIEW::LEFT_UNRECTIFIED_BGRA, +#else + sl::VIEW::LEFT_UNRECTIFIED, +#endif + gpu ? sl::MEM::GPU : sl::MEM::CPU, _matResol)); + DEBUG_STREAM_VD( + "Color raw image " << _matResol.width << "x" << _matResol.height << + " retrieved - timestamp: " << _sdkGrabTS.getNanoseconds() << + " nsec"); + } + if (_graySubCount > 0) { + retrieved |= (sl::ERROR_CODE::SUCCESS == + _zed->retrieveImage( + _matGray, sl::VIEW::LEFT_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, _matResol)); + DEBUG_STREAM_VD( + "Gray image " << _matResol.width << "x" << _matResol.height << " retrieved - timestamp: " << + _sdkGrabTS.getNanoseconds() << + " nsec"); + } + if (_grayRawSubCount > 0) { + retrieved |= + (sl::ERROR_CODE::SUCCESS == + _zed->retrieveImage( + _matGrayRaw, sl::VIEW::LEFT_UNRECTIFIED_GRAY, + gpu ? sl::MEM::GPU : sl::MEM::CPU, _matResol)); + DEBUG_STREAM_VD( + "Gray raw image " << _matResol.width << "x" << _matResol.height << + " retrieved - timestamp: " << _sdkGrabTS.getNanoseconds() << + " nsec"); + } + if (retrieved) { + DEBUG_VD("Image Data retrieved"); + } + // <---- Retrieve all required data +} + +void ZedCameraOne::publishImages() +{ + DEBUG_VD("=== Publish Image topics === "); + sl_tools::StopWatch vdElabTimer(get_clock()); + vdElabTimer.tic(); + + if (_sdkGrabTS.getNanoseconds() == _lastTs_grab.getNanoseconds()) { + DEBUG_VD("publishImages: ignoring not update data"); + DEBUG_STREAM_VD( + "Latest Ts: " << _lastTs_grab.getNanoseconds() + << " - New Ts: " + << _sdkGrabTS.getNanoseconds()); + return; + } + + if (_sdkGrabTS.data_ns != 0) { + double period_sec = + static_cast(_sdkGrabTS.data_ns - _lastTs_grab.data_ns) / 1e9; + DEBUG_STREAM_VD( + "IMAGE PUB LAST PERIOD: " << period_sec << " sec @" + << 1. / period_sec << " Hz"); + + _imagePeriodMean_sec->addValue(period_sec); + DEBUG_STREAM_VD( + "IMAGE PUB MEAN PERIOD: " + << _imagePeriodMean_sec->getAvg() << " sec @" + << 1. / _imagePeriodMean_sec->getAvg() << " Hz"); + } + _lastTs_grab = _sdkGrabTS; + + rclcpp::Time timeStamp; + if (_svoMode) { + timeStamp = _frameTimestamp; + } else { + timeStamp = sl_tools::slTime2Ros(_sdkGrabTS, get_clock()->get_clock_type()); + } + + publishColorImage(timeStamp); + publishColorRawImage(timeStamp); + publishGrayImage(timeStamp); + publishGrayRawImage(timeStamp); + + _imageElabMean_sec->addValue(vdElabTimer.toc()); + + double vd_period_usec = 1e6 / _camGrabFrameRate; + double elapsed_usec = _imgPubFreqTimer.toc() * 1e6; + + if (elapsed_usec < vd_period_usec) { + rclcpp::sleep_for( + std::chrono::microseconds( + static_cast(vd_period_usec - elapsed_usec))); + } + + _imgPubFreqTimer.tic(); + + DEBUG_VD("=== Video and Depth topics published === "); +} + +void ZedCameraOne::publishColorImage(const rclcpp::Time & timeStamp) +{ + if (_colorSubCount > 0) { + DEBUG_STREAM_VD("_colorSubCount: " << _colorSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + _matColor, _pubColorImg, _pubColorImgInfo, _pubColorImgInfoTrans, + _camInfoMsg, _camOptFrameId, timeStamp); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + _matColor, _nitrosPubColorImg, _pubColorImgInfo, _pubColorImgInfoTrans, + _camInfoMsg, _camOptFrameId, timeStamp); +#endif + } + } else { + publishCameraInfo(_pubColorImgInfo, _camInfoMsg, timeStamp); + publishCameraInfo(_pubColorImgInfoTrans, _camInfoMsg, timeStamp); + } +} + +void ZedCameraOne::publishColorRawImage(const rclcpp::Time & timeStamp) +{ + if (_colorRawSubCount > 0) { + DEBUG_STREAM_VD("_colorRawSubCount: " << _colorRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + _matColorRaw, _pubColorRawImg, _pubColorRawImgInfo, + _pubColorRawImgInfoTrans, _camInfoRawMsg, _camOptFrameId, timeStamp); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + _matColorRaw, _nitrosPubColorRawImg, _pubColorRawImgInfo, _pubColorRawImgInfoTrans, + _camInfoRawMsg, _camOptFrameId, timeStamp); +#endif + } + } else { + publishCameraInfo(_pubColorRawImgInfo, _camInfoRawMsg, timeStamp); + publishCameraInfo(_pubColorRawImgInfoTrans, _camInfoRawMsg, timeStamp); + } +} + +void ZedCameraOne::publishGrayImage(const rclcpp::Time & timeStamp) +{ + if (_graySubCount > 0) { + DEBUG_STREAM_VD("_graySubCount: " << _graySubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + _matGray, _pubGrayImg, _pubGrayImgInfo, _pubGrayImgInfoTrans, + _camInfoMsg, _camOptFrameId, timeStamp); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + _matGray, _nitrosPubGrayImg, _pubGrayImgInfo, _pubGrayImgInfoTrans, + _camInfoMsg, _camOptFrameId, timeStamp); +#endif + } + } else { + publishCameraInfo(_pubGrayImgInfo, _camInfoMsg, timeStamp); + publishCameraInfo(_pubGrayImgInfoTrans, _camInfoMsg, timeStamp); + } +} + +void ZedCameraOne::publishGrayRawImage(const rclcpp::Time & timeStamp) +{ + if (_grayRawSubCount > 0) { + DEBUG_STREAM_VD("_grayRawSubCount: " << _grayRawSubCount); + if (_nitrosDisabled) { + publishImageWithInfo( + _matGrayRaw, _pubGrayRawImg, _pubGrayRawImgInfo, + _pubGrayRawImgInfoTrans, _camInfoRawMsg, _camOptFrameId, timeStamp); + } else { +#ifdef FOUND_ISAAC_ROS_NITROS + publishImageWithInfo( + _matGrayRaw, _nitrosPubGrayRawImg, _pubGrayRawImgInfo, _pubGrayRawImgInfoTrans, + _camInfoRawMsg, _camOptFrameId, timeStamp); +#endif + } + } else { + publishCameraInfo(_pubGrayRawImgInfo, _camInfoRawMsg, timeStamp); + publishCameraInfo(_pubGrayRawImgInfoTrans, _camInfoRawMsg, timeStamp); + } +} + +void ZedCameraOne::publishCameraInfo( + const camInfoPub & infoPub, + camInfoMsgPtr & camInfoMsg, + const rclcpp::Time & t) +{ + auto ts = _usePubTimestamps ? get_clock()->now() : t; + camInfoMsg->header.stamp = ts; + + if (infoPub) { + if (count_subscribers(infoPub->get_topic_name()) > 0) { + infoPub->publish(*camInfoMsg); + DEBUG_STREAM_VD( + " * Camera Info message published: " << infoPub->get_topic_name()); + DEBUG_STREAM_VD(" * Timestamp: " << ts.nanoseconds() << " nsec"); + } + } +} + +void ZedCameraOne::publishImageWithInfo( + const sl::Mat & img, + const image_transport::Publisher & pubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t) +{ + auto image = sl_tools::imageToROSmsg(img, imgFrameId, t, _usePubTimestamps); + DEBUG_STREAM_VD("Publishing IMAGE message: " << t.nanoseconds() << " nsec"); + try { + publishCameraInfo(infoPub, camInfoMsg, image->header.stamp); + publishCameraInfo(infoPubTrans, camInfoMsg, image->header.stamp); + pubImg.publish(std::move(image)); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM("Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM("Message publishing generic exception: "); + } +} +#ifdef FOUND_ISAAC_ROS_NITROS +void ZedCameraOne::publishImageWithInfo( + const sl::Mat & img, + const nitrosImgPub & nitrosPubImg, + const camInfoPub & infoPub, + const camInfoPub & infoPubTrans, + camInfoMsgPtr & camInfoMsg, + const std::string & imgFrameId, + const rclcpp::Time & t) +{ + DEBUG_STREAM_VD(" * Publishing NITROS IMAGE message: " << t.nanoseconds() << " nsec"); + try { + size_t dpitch = img.getWidthBytes(); + size_t spitch = img.getStepBytes(sl::MEM::GPU); // SL Mat can be padded + + size_t dbuffer_size{spitch * img.getHeight()}; + void * dbuffer; + CUDA_CHECK(cudaMalloc(&dbuffer, dbuffer_size)); + + DEBUG_NITROS("Sent CUDA Image buffer with memory at: %p", dbuffer); + + // Copy data bytes to CUDA buffer + CUDA_CHECK( + cudaMemcpy2D( + dbuffer, + dpitch, + img.getPtr(sl::MEM::GPU), + spitch, + img.getWidth() * img.getPixelBytes(), img.getHeight(), + cudaMemcpyDeviceToDevice)); + + // Adding header data + std_msgs::msg::Header header; + header.stamp = _usePubTimestamps ? get_clock()->now() : t; + header.frame_id = imgFrameId; + + auto encoding = img_encodings::BGRA8; // Default encoding + if (img.getDataType() == sl::MAT_TYPE::U8_C1) { + encoding = img_encodings::MONO8; // Mono image + } else if (img.getDataType() == sl::MAT_TYPE::U8_C3) { + encoding = img_encodings::BGR8; // BGR image + } else if (img.getDataType() == sl::MAT_TYPE::F32_C1) { + encoding = img_encodings::TYPE_32FC1; // Float image + } + + // Create NitrosImage wrapping CUDA buffer + nvidia::isaac_ros::nitros::NitrosImage nitros_image = + nvidia::isaac_ros::nitros::NitrosImageBuilder() + .WithHeader(header) + .WithEncoding(encoding) + .WithDimensions(img.getHeight(), img.getWidth()) + .WithGpuData(dbuffer) + //.WithGpuData(img.getPtr(sl::MEM::GPU)) // TODO: Enable direct GPU memory sharing when supported by Isaac ROS. + .Build(); + + nitrosPubImg->publish(nitros_image); + publishCameraInfo(infoPub, camInfoMsg, header.stamp); + publishCameraInfo(infoPubTrans, camInfoMsg, header.stamp); + } catch (std::system_error & e) { + DEBUG_STREAM_COMM(" * Message publishing exception: " << e.what()); + } catch (...) { + DEBUG_STREAM_COMM(" * Message publishing generic exception: "); + } +} +#endif + +void ZedCameraOne::applyDynamicSettings() +{ + DEBUG_STREAM_COMM("=== Applying dynamic settings ==="); + + if (_debugCommon) { + DEBUG_COMM("Settings to apply: "); + for (auto & param: _camDynParMapChanged) { + if (param.second) {DEBUG_STREAM_COMM(" * " << param.first);} + } + } + + applySaturationSharpnessGamma(); + applyWhiteBalance(); + applyExposure(); + applyAnalogGain(); + applyDigitalGain(); + applyExposureCompensationAndDenoising(); + + DEBUG_COMM("Settings yet to apply: "); + int count = 0; + for (auto & param: _camDynParMapChanged) { + if (param.second) { + count++; + DEBUG_STREAM_COMM(" * " << param.first); + } + } + if (count == 0) { + DEBUG_COMM(" * NONE"); + } + + _triggerUpdateDynParams = (count > 0); + + DEBUG_STREAM_COMM("=== Dynamic settings applied ==="); +} + +// Helper function for saturation, sharpness, gamma +void ZedCameraOne::applySaturationSharpnessGamma() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + setVideoSetting(sl::VIDEO_SETTINGS::SATURATION, _camSaturation, "video.saturation"); + setVideoSetting(sl::VIDEO_SETTINGS::SHARPNESS, _camSharpness, "video.sharpness"); + setVideoSetting(sl::VIDEO_SETTINGS::GAMMA, _camGamma, "video.gamma"); +} + +// Helper function for white balance +void ZedCameraOne::applyWhiteBalance() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + setVideoSetting(sl::VIDEO_SETTINGS::WHITEBALANCE_AUTO, _camAutoWB, "video.auto_whitebalance"); + if (!_camAutoWB) { + setVideoSetting( + sl::VIDEO_SETTINGS::WHITEBALANCE_TEMPERATURE, _camWBTemp * 100, + "video.whitebalance_temperature"); + set_parameter(rclcpp::Parameter("video.auto_whitebalance", false)); + } else { + _camDynParMapChanged["video.whitebalance_temperature"] = false; + } +} + +// Helper function for exposure +void ZedCameraOne::applyExposure() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + auto setVideoSettingRange = [this](sl::VIDEO_SETTINGS setting, int value_min, + int value_max, const std::string & settingName_min, const std::string & settingName_max) { + if (this->_camDynParMapChanged[settingName_min] || + this->_camDynParMapChanged[settingName_max]) + { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value_min, value_max); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting the range of [" + << settingName_min << "," << settingName_max << ": " + << sl::toString(ret_code)); + return false; + } else { + this->_camDynParMapChanged[settingName_min] = false; + this->_camDynParMapChanged[settingName_max] = false; + DEBUG_STREAM_COMM("Set " << settingName_min << " to " << value_min); + DEBUG_STREAM_COMM("Set " << settingName_max << " to " << value_max); + return true; + } + return false; + } + return true; + }; + + if (_camAutoExposure) { + if (_camDynParMapChanged["video.auto_exposure"]) { + set_parameters( + { + rclcpp::Parameter("video.auto_exposure_time_range_min", 28), + rclcpp::Parameter("video.auto_exposure_time_range_max", 30000) + }); + DEBUG_STREAM_COMM("Forced exposure range to [" << 28 << "," << 30000 << "]"); + } + DEBUG_STREAM_COMM( + "Set video.auto_exposure to " + << (_camAutoExposure ? "TRUE" : "FALSE")); + } else { + setVideoSetting( + sl::VIDEO_SETTINGS::EXPOSURE_TIME, _camExpTime, + "video.exposure_time"); + set_parameters( + { + rclcpp::Parameter("video.auto_exposure", false), + rclcpp::Parameter("video.auto_exposure_time_range_min", _camExpTime), + rclcpp::Parameter("video.auto_exposure_time_range_max", _camExpTime) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_exposure to false and exposure range to [" << _camExpTime << "," << + _camExpTime << + "]"); + } + _camDynParMapChanged["video.auto_exposure"] = false; + _camDynParMapChanged["video.exposure_time"] = false; + + if (setVideoSettingRange( + sl::VIDEO_SETTINGS::AUTO_EXPOSURE_TIME_RANGE, + _camAutoExpTimeRangeMin, _camAutoExpTimeRangeMax, + "video.auto_exposure_time_range_min", + "video.auto_exposure_time_range_max")) + { + if (_camAutoExpTimeRangeMin == _camAutoExpTimeRangeMax) { + set_parameters( + { + rclcpp::Parameter("video.auto_exposure", false), + rclcpp::Parameter("video.exposure_time", _camAutoExpTimeRangeMin) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_exposure to false and video.exposure_time to " << + _camAutoExpTimeRangeMin); + } else { + set_parameter(rclcpp::Parameter("video.auto_exposure", true)); + DEBUG_STREAM_COMM("Forced video.auto_exposure to true"); + } + } +} + +// Helper function for analog gain +void ZedCameraOne::applyAnalogGain() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + auto setVideoSettingRange = [this](sl::VIDEO_SETTINGS setting, int value_min, + int value_max, const std::string & settingName_min, const std::string & settingName_max) { + if (this->_camDynParMapChanged[settingName_min] || + this->_camDynParMapChanged[settingName_max]) + { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value_min, value_max); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting the range of [" + << settingName_min << "," << settingName_max << ": " + << sl::toString(ret_code)); + return false; + } else { + this->_camDynParMapChanged[settingName_min] = false; + this->_camDynParMapChanged[settingName_max] = false; + DEBUG_STREAM_COMM("Set " << settingName_min << " to " << value_min); + DEBUG_STREAM_COMM("Set " << settingName_max << " to " << value_max); + return true; + } + return false; + } + return true; + }; + + if (_camAutoAnalogGain) { + if (_camDynParMapChanged["video.auto_analog_gain"]) { + set_parameters( + { + rclcpp::Parameter("video.auto_analog_gain_range_min", 1000), + rclcpp::Parameter("video.auto_analog_gain_range_max", 16000) + }); + DEBUG_STREAM_COMM("Forced analog gain range to [" << 1000 << "," << 16000 << "]"); + } + DEBUG_STREAM_COMM( + "Set video.auto_analog_gain to " + << (_camAutoAnalogGain ? "TRUE" : "FALSE")); + } else { + setVideoSetting( + sl::VIDEO_SETTINGS::ANALOG_GAIN, _camAnalogGain, "video.analog_gain"); + set_parameters( + { + rclcpp::Parameter("video.auto_analog_gain", false), + rclcpp::Parameter("video.auto_analog_gain_range_min", _camAnalogGain), + rclcpp::Parameter("video.auto_analog_gain_range_max", _camAnalogGain) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_analog_gain to false and analog gain range to [" << _camAnalogGain << + "," << _camAnalogGain << + "]"); + } + _camDynParMapChanged["video.auto_analog_gain"] = false; + _camDynParMapChanged["video.analog_gain"] = false; + + if (setVideoSettingRange( + sl::VIDEO_SETTINGS::AUTO_ANALOG_GAIN_RANGE, + _camAutoAnalogGainRangeMin, _camAutoAnalogGainRangeMax, + "video.auto_analog_gain_range_min", + "video.auto_analog_gain_range_max")) + { + if (_camAutoAnalogGainRangeMin == _camAutoAnalogGainRangeMax) { + set_parameters( + { + rclcpp::Parameter("video.auto_analog_gain", false), + rclcpp::Parameter("video.analog_gain", _camAutoAnalogGainRangeMin) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_analog_gain to false and video.analog_gain to " << + _camAutoAnalogGainRangeMin); + } else { + set_parameter(rclcpp::Parameter("video.auto_analog_gain", true)); + DEBUG_STREAM_COMM("Forced video.auto_analog_gain to true"); + } + } +} + +// Helper function for digital gain +void ZedCameraOne::applyDigitalGain() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + auto setVideoSettingRange = [this](sl::VIDEO_SETTINGS setting, int value_min, + int value_max, const std::string & settingName_min, const std::string & settingName_max) { + if (this->_camDynParMapChanged[settingName_min] || + this->_camDynParMapChanged[settingName_max]) + { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value_min, value_max); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting the range of [" + << settingName_min << "," << settingName_max << ": " + << sl::toString(ret_code)); + return false; + } else { + this->_camDynParMapChanged[settingName_min] = false; + this->_camDynParMapChanged[settingName_max] = false; + DEBUG_STREAM_COMM("Set " << settingName_min << " to " << value_min); + DEBUG_STREAM_COMM("Set " << settingName_max << " to " << value_max); + return true; + } + return false; + } + return true; + }; + + if (_camAutoDigitalGain) { + if (_camDynParMapChanged["video.auto_digital_gain"]) { + set_parameters( + { + rclcpp::Parameter("video.auto_digital_gain_range_min", 1), + rclcpp::Parameter("video.auto_digital_gain_range_max", 256) + }); + DEBUG_STREAM_COMM("Forced digital gain range to [" << 1 << "," << 256 << "]"); + } + DEBUG_STREAM_COMM( + "Set video.auto_digital_gain to " + << (_camAutoDigitalGain ? "TRUE" : "FALSE")); + } else { + setVideoSetting( + sl::VIDEO_SETTINGS::DIGITAL_GAIN, _camDigitalGain, "video.digital_gain"); + set_parameters( + { + rclcpp::Parameter("video.auto_digital_gain", false), + rclcpp::Parameter("video.auto_digital_gain_range_min", _camDigitalGain), + rclcpp::Parameter("video.auto_digital_gain_range_max", _camDigitalGain) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_digital_gain to false and digital gain range to [" << _camDigitalGain << + "," << _camDigitalGain << + "]"); + } + _camDynParMapChanged["video.auto_digital_gain"] = false; + _camDynParMapChanged["video.digital_gain"] = false; + + if (setVideoSettingRange( + sl::VIDEO_SETTINGS::AUTO_DIGITAL_GAIN_RANGE, + _camAutoDigitalGainRangeMin, _camAutoDigitalGainRangeMax, + "video.auto_digital_gain_range_min", + "video.auto_digital_gain_range_max")) + { + if (_camAutoDigitalGainRangeMin == _camAutoDigitalGainRangeMax) { + set_parameters( + { + rclcpp::Parameter("video.auto_digital_gain", false), + rclcpp::Parameter("video.digital_gain", _camAutoDigitalGainRangeMin) + }); + DEBUG_STREAM_COMM( + "Forced video.auto_digital_gain to false and video.digital_gain to " << + _camAutoDigitalGainRangeMin); + } else { + set_parameter(rclcpp::Parameter("video.auto_digital_gain", true)); + DEBUG_STREAM_COMM("Forced video.auto_digital_gain to true"); + } + } +} + +// Helper function for exposure compensation and denoising +void ZedCameraOne::applyExposureCompensationAndDenoising() +{ + auto setVideoSetting = [this](sl::VIDEO_SETTINGS setting, int value, + const std::string & settingName) { + if (this->_camDynParMapChanged[settingName]) { + sl::ERROR_CODE ret_code = _zed->setCameraSettings(setting, value); + if (ret_code != sl::ERROR_CODE::SUCCESS) { + RCLCPP_WARN_STREAM( + get_logger(), "Error setting " + << settingName << ": " + << sl::toString(ret_code)); + } else { + this->_camDynParMapChanged[settingName] = false; + DEBUG_STREAM_COMM("Set " << settingName << " to " << value); + } + } + }; + + setVideoSetting( + sl::VIDEO_SETTINGS::EXPOSURE_COMPENSATION, _camExposureComp, + "video.exposure_compensation"); + + setVideoSetting(sl::VIDEO_SETTINGS::DENOISING, _camDenoising, "video.denoising"); +} + +void ZedCameraOne::publishCameraInfos() +{ + rclcpp::Time pub_ts = get_clock()->now(); + + publishCameraInfo(_pubColorImgInfo, _camInfoMsg, pub_ts); + publishCameraInfo(_pubColorRawImgInfo, _camInfoRawMsg, pub_ts); + publishCameraInfo(_pubGrayImgInfo, _camInfoMsg, pub_ts); + publishCameraInfo(_pubGrayRawImgInfo, _camInfoRawMsg, pub_ts); + publishCameraInfo(_pubColorImgInfoTrans, _camInfoMsg, pub_ts); + publishCameraInfo(_pubColorRawImgInfoTrans, _camInfoRawMsg, pub_ts); + publishCameraInfo(_pubGrayImgInfoTrans, _camInfoMsg, pub_ts); + publishCameraInfo(_pubGrayRawImgInfoTrans, _camInfoRawMsg, pub_ts); +} + +} diff --git a/src/lib/zed-ros2-wrapper/zed_debug/CMakeLists.txt b/src/lib/zed-ros2-wrapper/zed_debug/CMakeLists.txt new file mode 100644 index 000000000..52a131f59 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/CMakeLists.txt @@ -0,0 +1,163 @@ +cmake_minimum_required(VERSION 3.8) +project(zed_debug) + +################################################ +## Generate symbols for IDE indexer (VSCode) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +################################################ +# Check the ROS2 version + +set(ROS2_FOUND FALSE) +if(DEFINED ENV{ROS_DISTRO}) + set(FOUND_ROS2_DISTRO $ENV{ROS_DISTRO}) + set(ROS2_FOUND TRUE) + #message("* Found ROS2 ${FOUND_ROS2_DISTRO}") +else() + message("* ROS2 distro variable not set. Trying to figure it out...") + set(ROS2_DISTROS "ardent;bouncy;crystal;dashing;eloquent;foxy;galactic;humble;iron;jazzy;kilted;rolling") + set(ROS2_FOUND FALSE) + foreach(distro ${ROS2_DISTROS}) + if(NOT ROS2_FOUND) + find_path(RCLCPP_H rclcpp.hpp PATHS /opt/ros/${distro}/include/rclcpp) + if(RCLCPP_H) + #message("* Found ROS2 ${distro}") + set(FOUND_ROS2_DISTRO ${distro}) + set(ROS2_FOUND TRUE) + endif() + endif() + endforeach() +endif() + +if(ROS2_FOUND) + if(${FOUND_ROS2_DISTRO} STREQUAL "foxy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_FOXY) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "humble") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_HUMBLE) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "iron") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_IRON) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "jazzy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_JAZZY) + else() + message("*** WARNING *** Unsupported ROS2 ${FOUND_ROS2_DISTRO}. '${PROJECT_NAME}' may not work correctly.") + endif() +else() + message("*** WARNING *** ROS2 distro is unknown. This package could not work correctly.") +endif() +################################################ + +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++17 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) + #message(" * Release with Debug Info Mode") + add_compile_options(-Wno-deprecated-declarations) +endif() + +if(CMAKE_BUILD_TYPE MATCHES Debug) + message(" * Debug Mode") +endif() + +############################################# +# Dependencies +find_package(ZED REQUIRED) + +exec_program(uname ARGS -p OUTPUT_VARIABLE CMAKE_SYSTEM_NAME2) +if(CMAKE_SYSTEM_NAME2 MATCHES "aarch64") # Jetson TX + set(CUDA_USE_STATIC_CUDA_RUNTIME OFF) +endif() + +find_package(CUDA REQUIRED) + +set(DEPENDENCIES + rclcpp + rclcpp_components + zed_components + backward_ros +) + +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() + +find_package(rcutils REQUIRED) +find_package(rclcpp REQUIRED) +find_package(rclcpp_components REQUIRED) +find_package(zed_components REQUIRED) + +# Debugging with backward-cpp +find_package(backward_ros REQUIRED) + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + ament_lint_auto_find_test_dependencies() +endif() + +############################################################################### +#Add all files in subdirectories of the project in +# a dummy_target so qtcreator have access to all files +file(GLOB_RECURSE all_files ${CMAKE_SOURCE_DIR}/*) +add_custom_target(all_${PROJECT_NAME}_files SOURCES ${all_files}) + +############################################################################### +# LIBS + +link_directories(${ZED_LIBRARY_DIR}) +link_directories(${CUDA_LIBRARY_DIRS}) + +set(ZED_LIBS + ${ZED_LIBRARIES} + ${CUDA_LIBRARIES} +) + +############################################################################### +# SOURCES +set(ZED_DEBUG_PROC + ${CMAKE_CURRENT_SOURCE_DIR}/src/zed_debug_proc.cpp +) + +############################################################################### +# Bin and Install + +# ZED DEBUG PROC executable +add_executable(zed_debug_proc + ${ZED_DEBUG_PROC} +) +target_include_directories(zed_debug_proc PUBLIC + ${CUDA_INCLUDE_DIRS} + ${ZED_INCLUDE_DIRS} +) +target_link_directories(zed_debug_proc PUBLIC + ${CUDA_LIBRARY_DIRS} +) +target_link_libraries(zed_debug_proc + ${CUDA_LIBRARIES} +) +ament_target_dependencies(zed_debug_proc + ${DEPENDENCIES} +) + +# Install executable +install(TARGETS zed_debug_proc + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION lib/${PROJECT_NAME} +) + +# Install LAUNCH and CONFIG files +install(DIRECTORY + launch config + DESTINATION share/${PROJECT_NAME} +) + +ament_package() diff --git a/src/lib/zed-ros2-wrapper/zed_debug/README.md b/src/lib/zed-ros2-wrapper/zed_debug/README.md new file mode 100644 index 000000000..a151a97a5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/README.md @@ -0,0 +1,97 @@ +# ZED ROS2 Wrapper Debugging + +## Build with Debug Symbols + +Go to the root of your ROS2 workspace. + +Clean the previous build if they do not contain debug symbols: + +```bash +rm -rf install +rm -rf build +rm -rf logs +``` + +Build the ZED ROS2 wrapper with debug symbols: + +```bash +colcon build --symlink-install --cmake-args=-DCMAKE_BUILD_TYPE=Debug --parallel-workers $(nproc) +``` + +or use `RelWithDebInfo` build type for optimized builds with debug symbols (normally required by the ZED SDK): + +```bash +colcon build --symlink-install --cmake-args=-DCMAKE_BUILD_TYPE=RelWithDebInfo --parallel-workers $(nproc) +``` + +## Debug with VSCode + +It's possible to debug the ZED ROS2 nodes using VSCode and the `ros2 launch` command with a `gdbserver` prefix. + +### Setup VSCode launch configuration + +1) Open VSCode on your workspace. +2) Go to your side bar, 'Run and Debug' section. +3) Add a new configuration by clicking on 'create a launch.json file'. +4) Select 'C++ (GDB/LLDB)'. +5) Replace the content of the generated `launch.json` file with the following: + +```json +{ + "version": "0.2.0", + "configurations": [ + { + "name": "C++ Debugger", + "request": "launch", + "type": "cppdbg", + "miDebuggerServerAddress": "localhost:3000", + "cwd": "${workspaceFolder}", + "program": "install/zed_debug/lib/zed_debug/zed_debug_proc", + "stopAtEntry": true + } + ] +} +``` + +with `"stopAtEntry": true` the node will stop at the beginning of the `main` function. This allows you to set breakpoints before the execution continues. + +Learn more about VSCode debugging configuration: + +* [Debug C++ in Visual Studio Code](https://code.visualstudio.com/docs/cpp/cpp-debug) +* [Configure C/C++ debugging](https://code.visualstudio.com/docs/cpp/launch-json-reference) + +### Start the node with gdbserver + +The `zed_camera_debug.launch.py` launch file has been modified to accept a `cmd_prefix` argument that allows you to add a prefix to the node executable. + +Run the launch file with the `gdbserver` prefix: + +```bash +ros2 launch zed_debug zed_camera_debug.launch.py camera_model:= cmd_prefix:='gdbserver localhost:3000' +``` + +## Debug with `GDB` + +You can also debug the ZED ROS2 nodes using `gdb` directly from the command line. + +```bash +ros2 launch zed_debug zed_camera_debug.launch.py camera_model:= cmd_prefix:='gdb --args' +``` + +You can modify `gdb --args` to add any other `gdb` options you may need. + +## Debug with `Valgrind` + +You can also run the ZED ROS2 nodes with `valgrind` to check for memory leaks and other memory-related issues. + +Run the launch file with the `valgrind` prefix: + +```bash +ros2 launch zed_debug zed_camera_debug.launch.py camera_model:= cmd_prefix:='valgrind --leak-check=full --track-origins=yes' +``` + +You can modify `valgrind --leak-check=full --track-origins=yes` to add any other `valgrind` options you may need. + +## Known Issues + +When the `isaac_ros_managed_nitros` package is installed and the ZED Camera Components are linked against Isaac ROS libraries, you may experience issues when trying to start the `zed_debug` executable. This appears to be an issue with the Isaac ROS libraries when used with static ROS 2 Composition instead of dynamic composition (using launch files). We are working with NVIDIA to resolve this issue. In the meantime, if you encounter this problem and want to debug the ZED Components, please uninstall the `isaac_ros_managed_nitros` package from your system and rebuild. diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/common_mono.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/common_mono.yaml new file mode 100644 index 000000000..ae9f0e21e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/common_mono.yaml @@ -0,0 +1,84 @@ +# config/common_mono.yaml +# Common parameters to Stereolabs ZED Stereo cameras + +--- +/**: + ros__parameters: + general: + serial_number: 0 # overwritten by launch file + pub_resolution: "CUSTOM" # The resolution used for output. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + gpu_id: -1 + optional_opencv_calibration_file: "" # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + publish_status: true # Enable/Disable the publishing of the camera status topic + + svo: + decryption_key: "" # Optional decryption key/passphrase when opening encrypted SVO files + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + publish_svo_clock: true # [overwritten by launch file options] When use_svo_timestamps is true allows to publish the SVO clock to the `/clock` topic. This is useful for synchronous rosbag playback. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: true # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + + video: + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + auto_exposure: true # [DYNAMIC] - Enables or disables auto exposure control. false: manual, true: auto + exposure_time: 16000 # [DYNAMIC] - Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # [DYNAMIC] - Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # [DYNAMIC] - Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # [DYNAMIC] - Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + auto_analog_gain: true # [DYNAMIC] - Enables or disables auto gain control. false: manual, true: auto + analog_gain: 1255 # [DYNAMIC] - Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # [DYNAMIC] - Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # [DYNAMIC] - Defines the maximum range of sensor gain in automatic control + auto_digital_gain: false # [DYNAMIC] - Enables or disables auto digital gain control. false: manual, true: auto + digital_gain: 1 # [DYNAMIC] - Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # [DYNAMIC] - Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # [DYNAMIC] - Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # [DYNAMIC] - Defines the level of denoising applied on both left and right images. Range [0-100] + enable_24bit_output: false # [SDK >= 5.1] Enable BGR 24-bit output for lower bandwidth usage. If false, BGRA 32-bit output is used (old way before SDK v5.1) + publish_rgb: true # Advertise the RGB image topics that are published only if a node subscribes to them + publish_gray: false # Advertise the gray image topics that are published only if a node subscribes to them + publish_raw: false # Advertise the raw image topics that are published only if a node subscribes to them (only if 'publish_rgb' or 'publish_gray' is true) + + sensors: + publish_imu_tf: true # [overwritten by launch file options] enable/disable the IMU TF broadcasting + sensors_pub_rate: 100. # [DYNAMIC] - frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + publish_imu: true # Advertise the IMU topic that is published only if a node subscribes to it + publish_imu_raw: false # Advertise the raw IMU topic that is published only if a node subscribes to it + publish_cam_imu_transf: false # Advertise the CAMERA-IMU transformation topic that is published only if a node subscribes to it + publish_temp: false # Advertise the temperature topics that are published only if a node subscribes to them + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: 'H264' # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + thread_sched_policy: "SCHED_BATCH" # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: '' # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + use_pub_timestamps: false # Use the current ROS time for the message timestamp instead of the camera timestamp. This is useful to test data communication latency. + debug_common: false + debug_dyn_params: false + debug_video_depth: false + debug_camera_controls: false + debug_sensors: false + debug_streaming: false + debug_advanced: false + debug_nitros: false + disable_nitros: false # If available, disable NITROS usage for debugging and testing purposes diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/common_stereo.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/common_stereo.yaml new file mode 100644 index 000000000..c45eb95d3 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/common_stereo.yaml @@ -0,0 +1,220 @@ +# config/common_stereo.yaml +# Common parameters to Stereolabs ZED Stereo cameras + +--- +/**: + ros__parameters: + use_sim_time: false # Set to `true` only if there is a publisher for the simulated clock to the `/clock` topic. Normally used in simulation mode. + + simulation: + sim_enabled: false # Set to `true` to enable the simulation mode and connect to a simulation server + sim_address: '127.0.0.1' # The connection address of the simulation server. See the documentation of the supported simulation plugins for more information. + sim_port: 30000 # The connection port of the simulation server. See the documentation of the supported simulation plugins for more information. + + svo: + decryption_key: "" # Optional decryption key/passphrase when opening encrypted SVO files + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + publish_svo_clock: false # [overwritten by launch file options] When use_svo_timestamps is true allows to publish the SVO clock to the `/clock` topic. This is useful for synchronous rosbag playback. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: false # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + replay_rate: 1.0 # Replay rate for the SVO when not used in realtime mode (between [0.10-5.0]) + + general: + camera_timeout_sec: 5 + camera_max_reconnect: 5 + camera_flip: false + self_calib: true # Enable the self-calibration process at camera opening. See https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#affeaa06cfc1d849e311e484ceb8edcc5 + serial_number: 0 # overwritten by launch file + pub_resolution: 'CUSTOM' # The resolution used for image and depth map publishing. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + pub_frame_rate: 15.0 # [DYNAMIC] Frequency of publishing of visual images and depth data (not the Point Cloud, see 'depth.point_cloud_freq'). This value must be equal or less than the camera framerate. + enable_image_validity_check: 1 # [SDK5 required] Sets the image validity check. If set to 1, the SDK will check if the frames are valid before processing. + gpu_id: -1 + optional_opencv_calibration_file: '' # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + async_image_retrieval: false # If set to true will camera image retrieve at a framerate different from \ref grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + publish_status: true # Advertise the status topics that are published only if a node subscribes to them + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + video: + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_exposure_gain: true # [DYNAMIC] + exposure: 80 # [DYNAMIC] + gain: 80 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + enable_24bit_output: false # [SDK >= 5.1] Enable BGR 24-bit output for lower bandwidth usage. If false, BGRA 32-bit output is used (old way before SDK v5.1) + publish_rgb: true # Advertise the RGB image topics that are published only if a node subscribes to them + publish_left_right: false # Advertise the left and right image topics that are published only if a node subscribes to them + publish_raw: false # Advertise the raw image topics that are published only if a node subscribes to them + publish_gray: false # Advertise the gray image topics that are published only if a node subscribes to them + publish_stereo: false # Advertise the stereo image topic that is published only if a node subscribes to it + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + sensors: + publish_imu_tf: false # [overwritten by launch file options] enable/disable the IMU TF broadcasting + sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message + sensors_pub_rate: 100. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + publish_imu: false # Advertise the IMU topic that is published only if a node subscribes to it + publish_imu_raw: false # Advertise the raw IMU topic that is published only if a node subscribes to it + publish_cam_imu_transf: false # Advertise the CAMERA-IMU transformation topic that is published only if a node subscribes to it + publish_mag: false # Advertise the magnetometer topic that is published only if a node subscribes to it + publish_baro: false # Advertise the barometer topic that is published only if a node subscribes to it + publish_temp: false # Advertise the temperature topics that are published only if a node subscribes to them + + region_of_interest: + automatic_roi: false # Enable the automatic ROI generation to automatically detect part of the robot in the FoV and remove them from the processing. Note: if enabled the value of `manual_polygon` is ignored + depth_far_threshold_meters: 2.5 # Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + image_height_ratio_cutoff: 0.5 # By default consider only the lower half of the image, can be useful to filter out the sky + #manual_polygon: '[]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.33],[0.75,0.33],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.25],[0.75,0.25],[0.75,0.75],[0.25,0.75]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + apply_to_depth: true # Apply ROI to depth processing + apply_to_positional_tracking: true # Apply ROI to positional tracking processing + apply_to_object_detection: true # Apply ROI to object detection processing + apply_to_body_tracking: true # Apply ROI to body tracking processing + apply_to_spatial_mapping: true # Apply ROI to spatial mapping processing + publish_roi_mask: false # Advertise the ROI mask image topic that is published only if a node subscribes to it + + depth: + depth_mode: 'NEURAL_LIGHT' # Matches the ZED SDK setting: 'NONE', 'NEURAL_LIGHT', 'NEURAL', 'NEURAL_PLUS' - Note: if 'NONE' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...) + depth_stabilization: 30 # Forces positional tracking to start if major than 0 - Range: [0,100] + openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters] + point_cloud_freq: 10.0 # [DYNAMIC] Frequency of the pointcloud publishing. This value must be equal or less than the camera framerate. + point_cloud_res: 'COMPACT' # The resolution used for point cloud publishing - 'COMPACT'-Standard resolution. Optimizes processing and bandwidth, 'REDUCED'-Half resolution. Low processing and bandwidth requirements + depth_confidence: 95 # [DYNAMIC] + depth_texture_conf: 100 # [DYNAMIC] + remove_saturated_areas: true # [DYNAMIC] + publish_depth_map: true # Advertise the depth map topics that are published only if a node subscribes to them + publish_depth_info: false # Advertise the depth info topic that is published only if a node subscribes to it + publish_point_cloud: true # Advertise the point cloud topic that is published only if a node subscribes to it + publish_depth_confidence: false # Advertise the depth confidence topic that is published only if a node subscribes to it + publish_disparity: false # Advertise the disparity topic that is published only if a node subscribes to it + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + pos_tracking: + pos_tracking_enabled: true # True to enable positional tracking from start + pos_tracking_mode: 'GEN_3' # Matches the ZED SDK setting: 'GEN_1', 'GEN_2', 'GEN_3' + imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used. + publish_tf: true # [overwritten by launch file options] publish `odom -> camera_link` TF + publish_map_tf: true # [overwritten by launch file options] publish `map -> odom` TF + map_frame: 'map' + odometry_frame: 'odom' + area_memory: true # Enable to detect loop closure + area_file_path: '' # Path to the area memory file for relocalization and loop closure in a previously explored environment. + enable_localization_only: false # If true, the camera will only localize in the loaded area memory without updating the map with new information. + save_area_memory_on_closing: false # Save Area memory before closing the camera if `area_file_path` is not empty. You can also use the `save_area_memory` service to save the area memory at any time. + reset_odom_with_loop_closure: true # Re-initialize odometry to the last valid pose when loop closure happens (reset camera odometry drift) + publish_3d_landmarks: false # Publish 3D landmarks used by the positional tracking algorithm + publish_lm_skip_frame: 5 # Publish the landmarks every X frames to reduce bandwidth. Set to 0 to publish all landmarks + depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation + set_as_static: false # If 'true' the camera will be static and not move in the environment + set_gravity_as_origin: true # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose. + floor_alignment: false # Enable to automatically calculate camera/floor offset + initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `camera_link` frame in the map -> [X, Y, Z, R, P, Y] + path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency + path_max_count: -1 # use '-1' for unlimited path size + two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to 'fixed_z_value', roll and pitch to zero + fixed_z_value: 0.0 # Value to be used for Z coordinate if `two_d_mode` is true + transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->camera_link` transform being generated + reset_pose_with_svo_loop: true # Reset the camera pose the `initial_base_pose` when the SVO loop is enabled and the SVO playback reaches the end of the file. + publish_odom_pose: true # Advertise the odometry and pose topics that are published only if a node subscribes to them + publish_pose_cov: false # Advertise the pose with covariance topic that is published only if a node subscribes to it + publish_cam_path: false # Advertise the camera odometry and pose path topics that are published only if a node subscribes to them + + gnss_fusion: + gnss_fusion_enabled: false # fuse 'sensor_msg/NavSatFix' message information into pose data + gnss_fix_topic: '/fix' # Name of the GNSS topic of type NavSatFix to subscribe [Default: '/gps/fix'] + gnss_zero_altitude: false # Set to `true` to ignore GNSS altitude information + h_covariance_mul: 1.0 # Multiplier factor to be applied to horizontal covariance of the received fix (plane X/Y) + v_covariance_mul: 1.0 # Multiplier factor to be applied to vertical covariance of the received fix (Z axis) + publish_utm_tf: true # Publish `utm` -> `map` TF + broadcast_utm_transform_as_parent_frame: false # if 'true' publish `utm` -> `map` TF, otherwise `map` -> `utm` + enable_reinitialization: false # determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. It becomes particularly crucial during prolonged GNSS signal loss scenarios. + enable_rolling_calibration: true # If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + enable_translation_uncertainty_target: false # When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + gnss_vio_reinit_threshold: 5.0 # determines the threshold for GNSS/VIO reinitialization. If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + target_translation_uncertainty: 0.1 # defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. By default, the threshold is set at 10 centimeters. + target_yaw_uncertainty: 0.1 # defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. The unit of this parameter is in radian. By default, the threshold is set at 0.1 radians. + + mapping: + mapping_enabled: false # True to enable mapping t # Pand fused point cloud pubblication + resolution: 0.05 # maps resolution in meters [min: 0.01f - max: 0.2f] + max_mapping_range: 5.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0] + fused_pointcloud_freq: 1.0 # frequency of the publishing of the fused colored point cloud + clicked_point_topic: '/clicked_point' # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection + pd_max_distance_threshold: 0.15 # Plane detection: controls the spread of plane by checking the position difference. + pd_normal_similarity_threshold: 15.0 # Plane detection: controls the spread of plane by checking the angle difference. + publish_det_plane: false # Advertise the plane detection topics that is published only if a node subscribes to it + + object_detection: + od_enabled: false # True to enable Object Detection + enable_tracking: true # Whether the object detection system includes object tracking capabilities across a sequence of images. + detection_model: 'MULTI_CLASS_BOX_FAST' # 'MULTI_CLASS_BOX_FAST', 'MULTI_CLASS_BOX_MEDIUM', 'MULTI_CLASS_BOX_ACCURATE', 'PERSON_HEAD_BOX_FAST', 'PERSON_HEAD_BOX_ACCURATE', 'CUSTOM_YOLOLIKE_BOX_OBJECTS' + max_range: 20.0 # [m] Upper depth range for detections.The value cannot be greater than 'depth.max_depth' + filtering_mode: 'NMS3D' # Filtering mode that should be applied to raw detections: 'NONE', 'NMS3D', 'NMS3D_PER_CLASS' + prediction_timeout: 2.0 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + # Other parameters are defined in the 'object_detection.yaml' and 'custom_object_detection.yaml' files + + body_tracking: + bt_enabled: false # True to enable Body Tracking + model: 'HUMAN_BODY_MEDIUM' # 'HUMAN_BODY_FAST', 'HUMAN_BODY_MEDIUM', 'HUMAN_BODY_ACCURATE' + body_format: 'BODY_38' # 'BODY_18','BODY_34','BODY_38' + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 15.0 # [m] Defines a upper depth range for detections + body_kp_selection: 'FULL' # 'FULL', 'UPPER_BODY' + enable_body_fitting: false # Defines if the body fitting will be applied + enable_tracking: true # Defines if the object detection will track objects across images flow + prediction_timeout_s: 0.5 # During this time [sec], the skeleton will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of skeleton key points [0,99] + minimum_keypoints_threshold: 5 # [DYNAMIC] - Minimum number of skeleton key points to be detected for a valid skeleton + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: 'H264' # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + change_thread_priority: false # Enable changing thread priority and scheduling policy + thread_sched_policy: 'SCHED_BATCH' # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_pointcloud_priority: 60 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: '' # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + use_pub_timestamps: false # Use the current ROS time for the message timestamp instead of the camera timestamp. This is useful to test data communication latency. + debug_common: false + debug_dyn_params: false + debug_grab: false + debug_sim: false + debug_video_depth: false + debug_camera_controls: false + debug_point_cloud: false + debug_tf: false + debug_positional_tracking: false + debug_gnss: false + debug_sensors: false + debug_mapping: false + debug_terrain_mapping: false + debug_object_detection: false + debug_body_tracking: false + debug_roi: false + debug_streaming: false + debug_advanced: false + debug_nitros: false + disable_nitros: false # If available, disable NITROS usage for debugging and testing purposes diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/custom_object_detection.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/custom_object_detection.yaml new file mode 100644 index 000000000..daacde19e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/custom_object_detection.yaml @@ -0,0 +1,1623 @@ +# Example of a custom object detection configuration file for the ZED ROS2 wrapper +# This file is used to configure the CUSTOM object detection parameters for the ZED camera. +# The parameters are defined in a YAML format and can be modified to suit the user's needs. +# The parameters are divided into sections for each CUSTOM object class, with each section containing +# specific parameters for that class. + +# This is an example of configuration file for a custom object detection model trained on the COCO dataset. +# COCO 2017 dataset https://cocodataset.org by Microsoft + +# To create a custom ONNX model, you can follow the online documentation: https://www.stereolabs.com/docs/yolo/export + +/**: + ros__parameters: + object_detection: + custom_onnx_file: '' # Path to the YOLO-like ONNX file for custom object detection directly performed by the ZED SDK + custom_onnx_input_size: 512 # Resolution used with the YOLO-like ONNX file. For example, 512 means a input tensor '1x3x512x512' + + custom_class_count: 80 # Number of classes in the custom ONNX file. For example, 80 for YOLOv8 trained on COCO dataset + + # TODO: Add one instance of each class to the list below + # Note: create a class_XXX identifier for each class in the custom ONNX file. + # Note: XXX is a number from 000 to 'custom_class_count-1', and it must be unique for each class. + # Note: the class_XXX identifier is not required to match the class ID [model_class_id] in the custom ONNX file. + + class_000: + label: 'person' + model_class_id: 0 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_001: + label: 'bicycle' + model_class_id: 1 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_002: + label: 'car' + model_class_id: 2 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_003: + label: 'motorcycle' + model_class_id: 3 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_004: + label: 'airplane' + model_class_id: 4 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_005: + label: 'bus' + model_class_id: 5 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_006: + label: 'train' + model_class_id: 6 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_007: + label: 'truck' + model_class_id: 7 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_008: + label: 'boat' + model_class_id: 8 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_009: + label: 'traffic light' + model_class_id: 9 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_010: + label: 'fire hydrant' + model_class_id: 10 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_011: + label: 'stop sign' + model_class_id: 11 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_012: + label: 'parking meter' + model_class_id: 12 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_013: + label: 'bench' + model_class_id: 13 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_014: + label: 'bird' + model_class_id: 14 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_015: + label: 'cat' + model_class_id: 15 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_016: + label: 'dog' + model_class_id: 16 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_017: + label: 'horse' + model_class_id: 17 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_018: + label: 'sheep' + model_class_id: 18 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_019: + label: 'cow' + model_class_id: 19 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_020: + label: 'elephant' + model_class_id: 20 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_021: + label: 'bear' + model_class_id: 21 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_022: + label: 'zebra' + model_class_id: 22 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_023: + label: 'giraffe' + model_class_id: 23 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_024: + label: 'backpack' + model_class_id: 24 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_025: + label: 'umbrella' + model_class_id: 25 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_026: + label: 'handbag' + model_class_id: 26 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_027: + label: 'tie' + model_class_id: 27 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_028: + label: 'suitcase' + model_class_id: 28 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_029: + label: 'frisbee' + model_class_id: 29 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_030: + label: 'skis' + model_class_id: 30 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_031: + label: 'snowboard' + model_class_id: 31 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_032: + label: 'sports ball' + model_class_id: 32 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_033: + label: 'kite' + model_class_id: 33 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_034: + label: 'baseball bat' + model_class_id: 34 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_035: + label: 'baseball glove' + model_class_id: 35 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_036: + label: 'skateboard' + model_class_id: 36 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_037: + label: 'surfboard' + model_class_id: 37 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_038: + label: 'tennis racket' + model_class_id: 38 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_039: + label: 'bottle' + model_class_id: 39 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_040: + label: 'wine glass' + model_class_id: 40 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_041: + label: 'cup' + model_class_id: 41 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_042: + label: 'fork' + model_class_id: 42 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_043: + label: 'knife' + model_class_id: 43 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_044: + label: 'spoon' + model_class_id: 44 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_045: + label: 'bowl' + model_class_id: 45 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_046: + label: 'banana' + model_class_id: 46 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_047: + label: 'apple' + model_class_id: 47 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_048: + label: 'sandwich' + model_class_id: 48 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_049: + label: 'orange' + model_class_id: 49 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_050: + label: 'broccoli' + model_class_id: 50 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_051: + label: 'carrot' + model_class_id: 51 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_052: + label: 'hot dog' + model_class_id: 52 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_053: + label: 'pizza' + model_class_id: 53 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_054: + label: 'donut' + model_class_id: 54 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_055: + label: 'cake' + model_class_id: 55 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_056: + label: 'chair' + model_class_id: 56 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_057: + label: 'couch' + model_class_id: 57 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_058: + label: 'potted plant' + model_class_id: 58 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_059: + label: 'bed' + model_class_id: 59 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_060: + label: 'dining table' + model_class_id: 60 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_061: + label: 'toilet' + model_class_id: 61 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_062: + label: 'tv' + model_class_id: 62 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_063: + label: 'laptop' + model_class_id: 63 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_064: + label: 'mouse' + model_class_id: 64 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_065: + label: 'remote' + model_class_id: 65 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_066: + label: 'keyboard' + model_class_id: 66 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_067: + label: 'cell phone' + model_class_id: 67 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_068: + label: 'microwave' + model_class_id: 68 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_069: + label: 'oven' + model_class_id: 69 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_070: + label: 'toaster' + model_class_id: 70 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_071: + label: 'sink' + model_class_id: 71 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_072: + label: 'refrigerator' + model_class_id: 72 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_073: + label: 'book' + model_class_id: 73 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_074: + label: 'clock' + model_class_id: 74 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_075: + label: 'vase' + model_class_id: 75 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_076: + label: 'scissors' + model_class_id: 76 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_077: + label: 'teddy bear' + model_class_id: 77 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_078: + label: 'hair drier' + model_class_id: 78 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + + class_079: + label: 'toothbrush' + model_class_id: 79 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/object_detection.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/object_detection.yaml new file mode 100644 index 000000000..42a39a7e4 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/object_detection.yaml @@ -0,0 +1,26 @@ +/**: + ros__parameters: + object_detection: + class: + people: + enabled: true # [DYNAMIC] - Enable/disable the detection of persons + confidence_threshold: 65.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + vehicle: + enabled: true # [DYNAMIC] - Enable/disable the detection of vehicles + confidence_threshold: 60.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + bag: + enabled: true # [DYNAMIC] - Enable/disable the detection of bags + confidence_threshold: 40.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + animal: + enabled: true # [DYNAMIC] - Enable/disable the detection of animals + confidence_threshold: 40.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + electronics: + enabled: true # [DYNAMIC] - Enable/disable the detection of electronic devices + confidence_threshold: 45.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + fruit_vegetable: + enabled: true # [DYNAMIC] - Enable/disable the detection of fruits and vegetables + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + sport: + enabled: true # [DYNAMIC] - Enable/disable the detection of sport-related objects + confidence_threshold: 30.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/virtual.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/virtual.yaml new file mode 100644 index 000000000..52ecec8b1 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/virtual.yaml @@ -0,0 +1,27 @@ +# config/zedx.yaml +# Parameters for Stereolabs ZED X camera +--- +/**: + ros__parameters: + general: + camera_model: 'virtual' + camera_name: 'virtual' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. '4K', 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 1000.0 diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zed.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zed.yaml new file mode 100644 index 000000000..a466dd24e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zed.yaml @@ -0,0 +1,19 @@ +# config/zed_yaml +# Parameters for Stereolabs ZED camera +--- +/**: + ros__parameters: + general: + camera_model: 'zed' + camera_name: 'zed' # overwritten by launch file + grab_resolution: 'HD720' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zed2.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zed2.yaml new file mode 100644 index 000000000..2d39fce48 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zed2.yaml @@ -0,0 +1,20 @@ +# config/zed2_yaml +# Parameters for Stereolabs ZED2 camera +--- +/**: + ros__parameters: + general: + camera_model: 'zed2' + camera_name: 'zed2' # overwritten by launch file + grab_resolution: 'HD720' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zed2i.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zed2i.yaml new file mode 100644 index 000000000..9e27b0f00 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zed2i.yaml @@ -0,0 +1,21 @@ +# config/zed2i_yaml +# Parameters for Stereolabs zed2i camera + +--- +/**: + ros__parameters: + general: + camera_model: 'zed2i' + camera_name: 'zed2i' # overwritten by launch file + grab_resolution: 'HD1080' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 15 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedm.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedm.yaml new file mode 100644 index 000000000..66319a997 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedm.yaml @@ -0,0 +1,20 @@ +# config/zedm_yaml +# Parameters for Stereolabs ZED mini camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedm' + camera_name: 'zedm' # overwritten by launch file + grab_resolution: 'HD1080' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 15.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedx.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedx.yaml new file mode 100644 index 000000000..897b76af5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedx.yaml @@ -0,0 +1,28 @@ +# config/zedx.yaml +# Parameters for Stereolabs ZED X camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedx' + camera_name: 'zedx' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdr.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdr.yaml new file mode 100644 index 000000000..3cf417568 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdr.yaml @@ -0,0 +1,28 @@ +# config/zedxhdr.yaml +# Parameters for Stereolabs ZED X HDR camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdr' + camera_name: 'zedxhdr' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmax.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmax.yaml new file mode 100644 index 000000000..85df02e0e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmax.yaml @@ -0,0 +1,28 @@ +# config/zedxhdrmax.yaml +# Parameters for Stereolabs ZED X HDR MAX camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdrmax' + camera_name: 'zedxhdrmax' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 20.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmini.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmini.yaml new file mode 100644 index 000000000..39200c2d0 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxhdrmini.yaml @@ -0,0 +1,28 @@ +# config/zedxhdrmini.yaml +# Parameters for Stereolabs ZED X HDR MINI camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdrmini' + camera_name: 'zedxhdrmini' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxm.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxm.yaml new file mode 100644 index 000000000..833b5dffa --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxm.yaml @@ -0,0 +1,28 @@ +# config/zedxm_yaml +# Parameters for Stereolabs ZED X Mini camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxm' + camera_name: 'zedxm' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16666 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 16666 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 8000 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 128 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxone4k.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxone4k.yaml new file mode 100644 index 000000000..a8efcaa88 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxone4k.yaml @@ -0,0 +1,14 @@ +# config/zedxone4k.yaml +# Parameters for Stereolabs ZED X One 4K camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxone4k' + camera_name: 'zedxone4k' # overwritten by launch file + grab_resolution: 'QHDPLUS' # The native camera grab resolution. 'HD4K', 'HD1200', 'QHDPLUS, 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD4K/QHDPLUS: 15 - HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + enable_hdr: false # When set to true, the camera will be set in HDR mode if the camera model and resolution allows it + diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonegs.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonegs.yaml new file mode 100644 index 000000000..f4d08b002 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonegs.yaml @@ -0,0 +1,10 @@ +# config/zedxonegs.yaml +# Parameters for Stereolabs ZED X One GS camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxonegs' + camera_name: 'zedxonegs' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonehdr.yaml b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonehdr.yaml new file mode 100644 index 000000000..e9f756744 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/config/zedxonehdr.yaml @@ -0,0 +1,10 @@ +# config/zedxonehdr.yaml +# Parameters for Stereolabs ZED X One HDR camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxonehdr' + camera_name: 'zedxonehdr' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/zed_debug/launch/zed_camera_debug.launch.py b/src/lib/zed-ros2-wrapper/zed_debug/launch/zed_camera_debug.launch.py new file mode 100644 index 000000000..c6d5e5c8d --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/launch/zed_camera_debug.launch.py @@ -0,0 +1,436 @@ +# Copyright 2025 Stereolabs +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from ament_index_python.packages import get_package_share_directory + +from launch import LaunchDescription +from launch.actions import ( + DeclareLaunchArgument, + OpaqueFunction, + LogInfo +) +from launch.conditions import IfCondition +from launch.substitutions import ( + LaunchConfiguration, + Command, + TextSubstitution +) +from launch_ros.actions import ( + Node +) + +# Enable colored output +os.environ["RCUTILS_COLORIZED_OUTPUT"] = "1" + +# ZED Configurations to be loaded by ZED Node +default_config_common = os.path.join( + get_package_share_directory('zed_debug'), + 'config', + 'common' +) + +# Object Detection Configuration to be loaded by ZED Node +default_object_detection_config_path = os.path.join( + get_package_share_directory('zed_debug'), + 'config', + 'object_detection.yaml' +) +# Custom Object Detection Configuration to be loaded by ZED Node +default_custom_object_detection_config_path = os.path.join( + get_package_share_directory('zed_debug'), + 'config', + 'custom_object_detection.yaml' +) + +# URDF/xacro file to be loaded by the Robot State Publisher node +default_xacro_path = os.path.join( + get_package_share_directory('zed_description'), + 'urdf', + 'zed_descr.urdf.xacro' +) + +# Function to parse array-like launch arguments +def parse_array_param(param): + cleaned = param.replace('[', '').replace(']', '').replace(' ', '') + if not cleaned: + return [] + return cleaned.split(',') + +def launch_setup(context, *args, **kwargs): + return_array = [] + + # Launch configuration variables + node_log_type = LaunchConfiguration('node_log_type') + + svo_path = LaunchConfiguration('svo_path') + publish_svo_clock = LaunchConfiguration('publish_svo_clock') + + use_sim_time = LaunchConfiguration('use_sim_time') + sim_mode = LaunchConfiguration('sim_mode') + sim_address = LaunchConfiguration('sim_address') + sim_port = LaunchConfiguration('sim_port') + + stream_address = LaunchConfiguration('stream_address') + stream_port = LaunchConfiguration('stream_port') + + namespace = LaunchConfiguration('namespace') + camera_name = LaunchConfiguration('camera_name') + camera_model = LaunchConfiguration('camera_model') + + node_name = LaunchConfiguration('node_name') + + ros_params_override_path = LaunchConfiguration('ros_params_override_path') + object_detection_config_path = LaunchConfiguration('object_detection_config_path') + custom_object_detection_config_path = LaunchConfiguration('custom_object_detection_config_path') + + serial_number = LaunchConfiguration('serial_number') + camera_id = LaunchConfiguration('camera_id') + + serial_numbers = LaunchConfiguration('serial_numbers') + camera_ids = LaunchConfiguration('camera_ids') + + publish_urdf = LaunchConfiguration('publish_urdf') + publish_tf = LaunchConfiguration('publish_tf') + publish_map_tf = LaunchConfiguration('publish_map_tf') + publish_imu_tf = LaunchConfiguration('publish_imu_tf') + xacro_path = LaunchConfiguration('xacro_path') + + enable_gnss = LaunchConfiguration('enable_gnss') + gnss_antenna_offset = LaunchConfiguration('gnss_antenna_offset') + + cmd_prefix = LaunchConfiguration('cmd_prefix') + + if( cmd_prefix.perform(context) == ''): + prefix_string = '' + else: + #prefix_string = 'xterm -geometry 250x40 -e ' + cmd_prefix.perform(context) + prefix_string = cmd_prefix.perform(context) + info = 'Using command prefix: `' + prefix_string + '`' + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + node_log_type_val = node_log_type.perform(context) + namespace_val = namespace.perform(context) + camera_name_val = camera_name.perform(context) + camera_model_val = camera_model.perform(context) + node_name_val = node_name.perform(context) + enable_gnss_val = enable_gnss.perform(context) + gnss_coords = parse_array_param(gnss_antenna_offset.perform(context)) + serial_numbers_val = serial_numbers.perform(context) + camera_ids_val = camera_ids.perform(context) + + if(node_log_type_val == 'both'): + node_log_effective = 'both' + else: # 'screen' or 'log' + node_log_effective = { + 'stdout': node_log_type_val, + 'stderr': node_log_type_val + } + + if (camera_name_val == ''): + camera_name_val = 'zed' + + if (camera_model_val == 'virtual'): + # Virtual Stereo Camera setup + serials = parse_array_param(serial_numbers_val) + ids = parse_array_param(camera_ids_val) + + # If not in live mode, at least one of serials or ids must be a valid 2-values array + if(len(serials) != 2 and len(ids) != 2 and svo_path.perform(context) == 'live'): + return [ + LogInfo(msg=TextSubstitution( + text='With a Virtual Stereo Camera setup, one of `serial_numbers` or `camera_ids` launch arguments must contain two valid values (Left and Right camera identification).')) + ] + + if(namespace_val == ''): + namespace_val = camera_name_val + '_debug' + else: + node_name_val = camera_name_val + '_debug' + + # Common configuration file + if (camera_model_val == 'zed' or + camera_model_val == 'zedm' or + camera_model_val == 'zed2' or + camera_model_val == 'zed2i' or + camera_model_val == 'zedx' or + camera_model_val == 'zedxm' or + camera_model_val == 'zedxhdr' or + camera_model_val == 'zedxhdrmini' or + camera_model_val == 'zedxhdrmax' or + camera_model_val == 'virtual'): + config_common_path_val = default_config_common + '_stereo.yaml' + else: + config_common_path_val = default_config_common + '_mono.yaml' + + info = 'Using common configuration file: ' + config_common_path_val + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Camera configuration file + config_camera_path = os.path.join( + get_package_share_directory('zed_debug'), + 'config', + camera_model_val + '.yaml' + ) + + info = 'Using camera configuration file: ' + config_camera_path + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Object Detection configuration file + info = 'Using Object Detection configuration file: ' + object_detection_config_path.perform(context) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Custom Object Detection configuration file + info = 'Using Custom Object Detection configuration file: ' + custom_object_detection_config_path.perform(context) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # ROS parameters override file + ros_params_override_path_val = ros_params_override_path.perform(context) + if(ros_params_override_path_val != ''): + info = 'Using ROS parameters override file: ' + ros_params_override_path_val + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Xacro command with options + xacro_command = [] + xacro_command.append('xacro') + xacro_command.append(' ') + xacro_command.append(xacro_path.perform(context)) + xacro_command.append(' ') + xacro_command.append('camera_name:=') + xacro_command.append(camera_name_val) + xacro_command.append(' ') + xacro_command.append('camera_model:=') + xacro_command.append(camera_model_val) + xacro_command.append(' ') + if(enable_gnss_val=='true'): + xacro_command.append(' ') + xacro_command.append('enable_gnss:=true') + xacro_command.append(' ') + if(len(gnss_coords)==3): + xacro_command.append('gnss_x:=') + xacro_command.append(gnss_coords[0]) + xacro_command.append(' ') + xacro_command.append('gnss_y:=') + xacro_command.append(gnss_coords[1]) + xacro_command.append(' ') + xacro_command.append('gnss_z:=') + xacro_command.append(gnss_coords[2]) + xacro_command.append(' ') + + # Robot State Publisher node + rsp_name = camera_name_val + '_state_publisher' + rsp_node = Node( + condition=IfCondition(publish_urdf), + package='robot_state_publisher', + namespace=namespace_val, + executable='robot_state_publisher', + name=rsp_name, + output=node_log_effective, + parameters=[{ + 'use_sim_time': publish_svo_clock, + 'robot_description': Command(xacro_command) + }], + remappings=[('robot_description', camera_name_val+'_description')] + ) + return_array.append(rsp_node) + + # ZED Node parameters + node_parameters = [] + + # Add YAML files + if(config_common_path_val != ''): + node_parameters.append(config_common_path_val) + if(config_camera_path != ''): + node_parameters.append(config_camera_path) + if(object_detection_config_path != ''): + node_parameters.append(object_detection_config_path.perform(context)) + if(custom_object_detection_config_path != ''): + node_parameters.append(custom_object_detection_config_path.perform(context)) + if( ros_params_override_path_val != ''): + node_parameters.append(ros_params_override_path.perform(context)) + + # Add launch arguments overrides + node_parameters.append( + # Launch arguments must override the YAML files values + { + 'use_sim_time': use_sim_time, + 'simulation.sim_enabled': sim_mode, + 'simulation.sim_address': sim_address, + 'simulation.sim_port': sim_port, + 'stream.stream_address': stream_address, + 'stream.stream_port': stream_port, + 'general.camera_name': camera_name_val, + 'general.camera_model': camera_model_val, + 'svo.svo_path': svo_path, + 'svo.publish_svo_clock': publish_svo_clock, + 'general.serial_number': serial_number, + 'general.camera_id': camera_id, + 'pos_tracking.publish_tf': publish_tf, + 'pos_tracking.publish_map_tf': publish_map_tf, + 'sensors.publish_imu_tf': publish_imu_tf, + 'gnss_fusion.gnss_fusion_enabled': enable_gnss, + 'general.virtual_serial_numbers': serial_numbers_val, + 'general.virtual_camera_ids': camera_ids_val + } + ) + + # Select what camera component to load in the Executor at Runtime + exe_args = [] + if( camera_model_val == 'zedxonegs' or + camera_model_val == 'zedxone4k' or + camera_model_val == 'zedxonehdr' ): + exe_args.append('--monocular') + + # ZED Wrapper node with hardcoded container + zed_node = Node( + executable='zed_debug_proc', + package='zed_debug', + name=node_name_val, + namespace=namespace_val, + parameters=node_parameters, + output='screen', + prefix=[prefix_string], + arguments=exe_args + ['--ros-args', '--log-level', 'debug'] + ) + return_array.append(zed_node) + + return return_array + +def generate_launch_description(): + return LaunchDescription( + [ + # Declare launch arguments + DeclareLaunchArgument( + 'node_log_type', + default_value=TextSubstitution(text='both'), + description='The log type of the node. It can be `screen`, `log` or `both`. The `log` type will save the log in a file in the `~/.ros/log/` folder. The `screen` type will print the log on the terminal. The `both` type will do both.', + choices=['screen', 'log', 'both']), + DeclareLaunchArgument( + 'camera_name', + default_value=TextSubstitution(text='zed'), + description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.'), + DeclareLaunchArgument( + 'camera_model', + description='[REQUIRED] The model of the camera. Using a wrong camera model can disable camera features.', + choices=['zed', 'zedm', 'zed2', 'zed2i', 'zedx', 'zedxm', 'zedxhdr', 'zedxhdrmini', 'zedxhdrmax', 'virtual', 'zedxonegs', 'zedxone4k', 'zedxonehdr']), + DeclareLaunchArgument( + 'namespace', + default_value='', + description='The namespace of the node. If empty (default) the camera name is used.'), + DeclareLaunchArgument( + 'node_name', + default_value='zed_debug_node', + description='The name of the zed_debug node. All the topic will have the same prefix: `///`. If a namespace is specified, the node name is replaced by the camera name.'), + DeclareLaunchArgument( + 'ros_params_override_path', + default_value='', + description='The path to an additional parameters file to override the default values.'), + DeclareLaunchArgument( + 'object_detection_config_path', + default_value=TextSubstitution(text=default_object_detection_config_path), + description='Path to the YAML configuration file for the Object Detection parameters.'), + DeclareLaunchArgument( + 'custom_object_detection_config_path', + default_value=TextSubstitution(text=default_custom_object_detection_config_path), + description='Path to the YAML configuration file for the Custom Object Detection parameters.'), + DeclareLaunchArgument( + 'serial_number', + default_value='0', + description='The serial number of the camera to be opened. It is mandatory to use this parameter or camera ID in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the serial number of all the connected cameras.'), + DeclareLaunchArgument( + 'serial_numbers', + default_value='[]', + description='The serial numbers of the two cameras to be opened to compose a Virtual Stereo Camera, [left_sn,right_sn]. Use `ZED_Explorer -a` to retrieve the serial number of all the connected cameras.'), + DeclareLaunchArgument( + 'camera_id', + default_value='-1', + description='The ID of the camera to be opened. It is mandatory to use this parameter or serial number in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the ID of all the connected cameras.'), + DeclareLaunchArgument( + 'camera_ids', + default_value='[]', + description='The IDs of the two cameras to be opened to compose a Virtual Stereo Camera, [left_id,right_id]. Use `ZED_Explorer -a` to retrieve the ID of all the connected cameras.'), + DeclareLaunchArgument( + 'publish_urdf', + default_value='true', + description='Enable URDF processing and starts Robot State Published to propagate static TF.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_tf', + default_value='true', + description='Enable publication of the `odom -> camera_link` TF.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_map_tf', + default_value='true', + description='Enable publication of the `map -> odom` TF. Note: Ignored if `publish_tf` is False.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_imu_tf', + default_value='false', + description='Enable publication of the IMU TF. Note: Ignored if `publish_tf` is False.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'xacro_path', + default_value=TextSubstitution(text=default_xacro_path), + description='Path to the camera URDF file as a xacro file.'), + DeclareLaunchArgument( + 'svo_path', + default_value=TextSubstitution(text='live'), + description='Path to an input SVO file.'), + DeclareLaunchArgument( + 'publish_svo_clock', + default_value='false', + description='If set to `true` the node will act as a clock server publishing the SVO timestamp. This is useful for node synchronization'), + DeclareLaunchArgument( + 'enable_gnss', + default_value='false', + description='Enable GNSS fusion to fix positional tracking pose with GNSS data from messages of type `sensor_msgs::msg::NavSatFix`. The fix topic can be customized in `common_stereo.yaml`.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'gnss_antenna_offset', + default_value='[]', + description='Position of the GNSS antenna with respect to the mounting point of the ZED camera. Format: [x,y,z]'), + DeclareLaunchArgument( + 'use_sim_time', + default_value='false', + description='If set to `true` the node will wait for messages on the `/clock` topic to start and will use this information as the timestamp reference', + choices=['true', 'false']), + DeclareLaunchArgument( + 'sim_mode', + default_value='false', + description='Enable simulation mode. Set `sim_address` and `sim_port` to configure the simulator input.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'sim_address', + default_value='127.0.0.1', + description='The connection address of the simulation server. See the documentation of the supported simulation plugins for more information.'), + DeclareLaunchArgument( + 'sim_port', + default_value='30000', + description='The connection port of the simulation server. See the documentation of the supported simulation plugins for more information.'), + DeclareLaunchArgument( + 'stream_address', + default_value='', + description='The connection address of the input streaming server.'), + DeclareLaunchArgument( + 'stream_port', + default_value='30000', + description='The connection port of the input streaming server.'), + DeclareLaunchArgument( + 'cmd_prefix', + default_value='', + description='A prefix to be added to the node executable, to run debugging tools like `valgrind` or `gdb`. For example: `valgrind --leak-check=full` or `gdb --args`.'), + OpaqueFunction(function=launch_setup) + ] + ) diff --git a/src/lib/zed-ros2-wrapper/zed_debug/package.xml b/src/lib/zed-ros2-wrapper/zed_debug/package.xml new file mode 100644 index 000000000..333b5cfdb --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/package.xml @@ -0,0 +1,34 @@ + + + + zed_debug + 5.2.2 + Package to debug ZED Components by loading them in a single C++ process + STEREOLABS + Apache License 2.0 + https://www.stereolabs.com/ + https://github.com/stereolabs/zed-ros2-wrapper + https://github.com/stereolabs/zed-ros2-wrapper/issues + + ament_cmake + + ament_cmake_auto + + rclcpp + rclcpp_components + rcutils + zed_components + backward_ros + + ament_lint_auto + ament_cmake_copyright + ament_cmake_cppcheck + ament_cmake_lint_cmake + ament_cmake_pep257 + ament_cmake_uncrustify + ament_cmake_xmllint + + + ament_cmake + + \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/zed_debug/src/zed_debug_proc.cpp b/src/lib/zed-ros2-wrapper/zed_debug/src/zed_debug_proc.cpp new file mode 100644 index 000000000..38b660ee5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_debug/src/zed_debug_proc.cpp @@ -0,0 +1,64 @@ +// Copyright 2026 Stereolabs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +int main(int argc, char ** argv) +{ + // Disable stdout buffering for better logging visibility + setvbuf(stdout, NULL, _IONBF, BUFSIZ); + + // Initialize ROS 2 + rclcpp::init(argc, argv); + + // Disable intra-process communication + rclcpp::NodeOptions options; + options.use_intra_process_comms(false); + + // Check for monocular mode argument + bool monocular_mode = false; + for (int i = 1; i < argc; ++i) { + std::string mode_arg = argv[i]; + if (mode_arg == "--monocular") { + monocular_mode = true; + break; + } + } + + // Create the appropriate ZED camera node (monocular or stereo) + rclcpp::Node::SharedPtr zed_component; + if (monocular_mode) { + RCLCPP_INFO( + rclcpp::get_logger("zed_debug_proc"), + "Debugging ZED Camera One (monocular) node..."); + zed_component = std::make_shared(options); + } else { + RCLCPP_INFO( + rclcpp::get_logger("zed_debug_proc"), + "Debugging ZED Camera (stereo) node..."); + zed_component = std::make_shared(options); + } + + // Create single-threaded executor and spin the node + rclcpp::executors::SingleThreadedExecutor executor; + executor.add_node(zed_component); + executor.spin(); + + // Shutdown ROS 2 + rclcpp::shutdown(); + + return EXIT_SUCCESS; +} diff --git a/src/lib/zed-ros2-wrapper/zed_ros2/CMakeLists.txt b/src/lib/zed-ros2-wrapper/zed_ros2/CMakeLists.txt new file mode 100644 index 000000000..28b6ca7f3 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_ros2/CMakeLists.txt @@ -0,0 +1,67 @@ +cmake_minimum_required(VERSION 3.8) +project(zed_ros2 NONE) + +## Generate symbols for IDE indexer +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +################################################ +# Check the ROS2 version + +set(ROS2_FOUND FALSE) +if(DEFINED ENV{ROS_DISTRO}) + set(FOUND_ROS2_DISTRO $ENV{ROS_DISTRO}) + set(ROS2_FOUND TRUE) + #message("* Found ROS2 ${FOUND_ROS2_DISTRO}") +else() + message("* ROS2 distro variable not set. Trying to figure it out...") + set(ROS2_DISTROS "ardent;bouncy;crystal;dashing;eloquent;foxy;galactic;humble;iron;jazzy;kilted;rolling") + set(ROS2_FOUND FALSE) + foreach(distro ${ROS2_DISTROS}) + if(NOT ROS2_FOUND) + find_path(RCLCPP_H rclcpp.hpp PATHS /opt/ros/${distro}/include/rclcpp) + if(RCLCPP_H) + #message("* Found ROS2 ${distro}") + set(FOUND_ROS2_DISTRO ${distro}) + set(ROS2_FOUND TRUE) + endif() + endif() + endforeach() +endif() + +if(ROS2_FOUND) + if(${FOUND_ROS2_DISTRO} STREQUAL "foxy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_FOXY) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "humble") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_HUMBLE) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "iron") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_IRON) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "jazzy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_JAZZY) + else() + message("*** WARNING *** Unsupported ROS2 ${FOUND_ROS2_DISTRO}. '${PROJECT_NAME}' may not work correctly.") + endif() +else() + message("*** WARNING *** ROS2 distro is unknown. This package could not work correctly.") +endif() +################################################ + +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + ament_lint_auto_find_test_dependencies() +endif() + +############################################################################### +# Add all files in subdirectories of the project in +# a dummy_target so qtcreator have access to all files +file(GLOB_RECURSE all_files ${CMAKE_CURRENT_SOURCE_DIR}/*) +add_custom_target(all_${PROJECT_NAME}_files SOURCES ${all_files}) +############################################################################### + +ament_package() diff --git a/src/lib/zed-ros2-wrapper/zed_ros2/package.xml b/src/lib/zed-ros2-wrapper/zed_ros2/package.xml new file mode 100644 index 000000000..04751ae6a --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_ros2/package.xml @@ -0,0 +1,33 @@ + + + + zed_ros2 + 5.2.2 + Stereolabs zed-ros2-wrapper support meta package + STEREOLABS + Apache License 2.0 + http://stereolabs.com/ + https://github.com/stereolabs/zed-ros2-wrapper + https://github.com/stereolabs/zed-ros2-wrapper/issues + + ament_cmake + + ament_cmake_auto + + zed_msgs + zed_description + zed_components + zed_wrapper + + ament_lint_auto + ament_cmake_copyright + ament_cmake_cppcheck + ament_cmake_lint_cmake + ament_cmake_pep257 + ament_cmake_uncrustify + ament_cmake_xmllint + + + ament_cmake + + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/CMakeLists.txt b/src/lib/zed-ros2-wrapper/zed_wrapper/CMakeLists.txt new file mode 100644 index 000000000..6407536d6 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/CMakeLists.txt @@ -0,0 +1,78 @@ +cmake_minimum_required(VERSION 3.8) +project(zed_wrapper) + +## Generate symbols for IDE indexer +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +################################################ +# Check the ROS2 version + +set(ROS2_FOUND FALSE) +if(DEFINED ENV{ROS_DISTRO}) + set(FOUND_ROS2_DISTRO $ENV{ROS_DISTRO}) + set(ROS2_FOUND TRUE) + #message("* Found ROS2 ${FOUND_ROS2_DISTRO}") +else() + message("* ROS2 distro variable not set. Trying to figure it out...") + set(ROS2_DISTROS "ardent;bouncy;crystal;dashing;eloquent;foxy;galactic;humble;iron;jazzy;kilted;rolling") + set(ROS2_FOUND FALSE) + foreach(distro ${ROS2_DISTROS}) + if(NOT ROS2_FOUND) + find_path(RCLCPP_H rclcpp.hpp PATHS /opt/ros/${distro}/include/rclcpp) + if(RCLCPP_H) + #message("* Found ROS2 ${distro}") + set(FOUND_ROS2_DISTRO ${distro}) + set(ROS2_FOUND TRUE) + endif() + endif() + endforeach() +endif() + +if(ROS2_FOUND) + if(${FOUND_ROS2_DISTRO} STREQUAL "foxy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_FOXY) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "iron") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_IRON) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "humble") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_HUMBLE) + elseif(${FOUND_ROS2_DISTRO} STREQUAL "jazzy") + #message("* ROS2 ${FOUND_ROS2_DISTRO} is officially supported by this package.") + add_definitions(-DFOUND_JAZZY) + else() + message("*** WARNING *** Unsupported ROS2 ${FOUND_ROS2_DISTRO}. '${PROJECT_NAME}' may not work correctly.") + endif() +else() + message("*** WARNING *** ROS2 distro is unknown. This package could not work correctly.") +endif() +################################################ + +############################################# +# Dependencies +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() + +############################################# +# Install + +# Install PARAMS files +install(DIRECTORY + config + DESTINATION share/${PROJECT_NAME} +) + +# Install URDF files +install(DIRECTORY + urdf + DESTINATION share/${PROJECT_NAME} +) + +# Install LAUNCH files +install(DIRECTORY + launch + DESTINATION share/${PROJECT_NAME} +) + +ament_package() diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_mono.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_mono.yaml new file mode 100644 index 000000000..fd905f9ea --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_mono.yaml @@ -0,0 +1,84 @@ +# config/common_mono.yaml +# Common parameters to Stereolabs ZED Stereo cameras + +--- +/**: + ros__parameters: + general: + serial_number: 0 # overwritten by launch file + pub_resolution: "CUSTOM" # The resolution used for output. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + gpu_id: -1 + optional_opencv_calibration_file: "" # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + publish_status: true # Enable/Disable the publishing of the camera status topic + + svo: + decryption_key: "" # Optional decryption key/passphrase when opening encrypted SVO files + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + publish_svo_clock: true # [overwritten by launch file options] When use_svo_timestamps is true allows to publish the SVO clock to the `/clock` topic. This is useful for synchronous rosbag playback. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: true # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + + video: + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + auto_exposure: true # [DYNAMIC] - Enables or disables auto exposure control. false: manual, true: auto + exposure_time: 16000 # [DYNAMIC] - Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # [DYNAMIC] - Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # [DYNAMIC] - Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # [DYNAMIC] - Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + auto_analog_gain: true # [DYNAMIC] - Enables or disables auto gain control. false: manual, true: auto + analog_gain: 1255 # [DYNAMIC] - Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # [DYNAMIC] - Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # [DYNAMIC] - Defines the maximum range of sensor gain in automatic control + auto_digital_gain: false # [DYNAMIC] - Enables or disables auto digital gain control. false: manual, true: auto + digital_gain: 1 # [DYNAMIC] - Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # [DYNAMIC] - Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # [DYNAMIC] - Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # [DYNAMIC] - Defines the level of denoising applied on both left and right images. Range [0-100] + enable_24bit_output: false # [SDK >= 5.1] Enable BGR 24-bit output for lower bandwidth usage. If false, BGRA 32-bit output is used (old way before SDK v5.1) + publish_rgb: true # Advertise the RGB image topics that are published only if a node subscribes to them + publish_gray: false # Advertise the gray image topics that are published only if a node subscribes to them + publish_raw: false # Advertise the raw image topics that are published only if a node subscribes to them (only if 'publish_rgb' or 'publish_gray' is true) + + sensors: + publish_imu_tf: true # [overwritten by launch file options] enable/disable the IMU TF broadcasting + sensors_pub_rate: 100. # [DYNAMIC] - frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + publish_imu: true # Advertise the IMU topic that is published only if a node subscribes to it + publish_imu_raw: false # Advertise the raw IMU topic that is published only if a node subscribes to it + publish_cam_imu_transf: false # Advertise the CAMERA-IMU transformation topic that is published only if a node subscribes to it + publish_temp: false # Advertise the temperature topics that are published only if a node subscribes to them + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: 'H265' # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + thread_sched_policy: "SCHED_BATCH" # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: '' # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + use_pub_timestamps: false # Use the current ROS time for the message timestamp instead of the camera timestamp. This is useful to test data communication latency. + debug_common: false # Enable/Disable the printing of debug messages related to the common parameters and functionalities + debug_dyn_params: false # Enable/Disable the printing of debug messages related to dynamic parameters updates + debug_video_depth: false # Enable/Disable the printing of debug messages related to video and depth parameters and functionalities + debug_camera_controls: false # Enable/Disable the printing of debug messages related to camera controls (exposure, gain, white balance, etc) + debug_sensors: false # Enable/Disable the printing of debug messages related to sensors data and functionalities + debug_streaming: false # Enable/Disable the printing of debug messages related to the network streaming functionalities + debug_advanced: false # Enable/Disable the printing of debug messages related to advanced thread parameters and functionalities + debug_nitros: false # Enable/Disable the printing of debug messages related to NITROS functionalities + disable_nitros: false # If available, disable NITROS usage for debugging and testing purposes diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_stereo.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_stereo.yaml new file mode 100644 index 000000000..151dcfc3c --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/common_stereo.yaml @@ -0,0 +1,220 @@ +# config/common_stereo.yaml +# Common parameters to Stereolabs ZED Stereo cameras + +--- +/**: + ros__parameters: + use_sim_time: false # Set to `true` only if there is a publisher for the simulated clock to the `/clock` topic. Normally used in simulation mode. + + simulation: + sim_enabled: false # Set to `true` to enable the simulation mode and connect to a simulation server + sim_address: '127.0.0.1' # The connection address of the simulation server. See the documentation of the supported simulation plugins for more information. + sim_port: 30000 # The connection port of the simulation server. See the documentation of the supported simulation plugins for more information. + + svo: + decryption_key: "" # Optional decryption key/passphrase when opening encrypted SVO files + use_svo_timestamps: true # Use the SVO timestamps to publish data. If false, data will be published at the system time. + publish_svo_clock: false # [overwritten by launch file options] When use_svo_timestamps is true allows to publish the SVO clock to the `/clock` topic. This is useful for synchronous rosbag playback. + svo_loop: false # Enable loop mode when using an SVO as input source. NOTE: ignored if SVO timestamping is used + svo_realtime: false # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + play_from_frame: 0 # Start playing the SVO from a specific frame + replay_rate: 1.0 # Replay rate for the SVO when not used in realtime mode (between [0.10-5.0]) + + general: + camera_timeout_sec: 5 + camera_max_reconnect: 5 + camera_flip: false + self_calib: true # Enable the self-calibration process at camera opening. See https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#affeaa06cfc1d849e311e484ceb8edcc5 + serial_number: 0 # overwritten by launch file + pub_resolution: 'CUSTOM' # The resolution used for image and depth map publishing. 'NATIVE' to use the same `general.grab_resolution` - `CUSTOM` to apply the `general.pub_downscale_factor` downscale factory to reduce bandwidth in transmission + pub_downscale_factor: 2.0 # rescale factor used to rescale image before publishing when 'pub_resolution' is 'CUSTOM' + grab_compute_capping_fps: 0.0 # Define a computation upper limit to the grab frequency. This can be useful to get a known constant fixed rate or limit the computation load while keeping a short exposure time by setting a high camera capture framerate. If set to 0, the grab compute capping will be disabled and the ZED SDK will process data at the grab rate. + pub_frame_rate: 0.0 # [DYNAMIC] Frequency of publishing of visual images and depth data (not the Point Cloud, see 'depth.point_cloud_freq'). This value must be <= the camera framerate. Set to 0 for no limit (= grab_frame_rate). + enable_image_validity_check: 1 # [SDK5 required] Sets the image validity check. If set to 1, the SDK will check if the frames are valid before processing. + gpu_id: -1 + optional_opencv_calibration_file: '' # Optional path where the ZED SDK can find a file containing the calibration information of the camera computed by OpenCV. Read the ZED SDK documentation for more information: https://www.stereolabs.com/docs/api/structsl_1_1InitParameters.html#a9eab2753374ef3baec1d31960859ba19 + async_image_retrieval: false # If set to true will camera image retrieve at a framerate different from \ref grab() application framerate. This is useful for recording SVO or sending camera stream at different rate than application. + publish_status: true # Advertise the status topics that are published only if a node subscribes to them + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + video: + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_exposure_gain: true # [DYNAMIC] + exposure: 80 # [DYNAMIC] + gain: 80 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] x100 - works only if `auto_whitebalance` is false + enable_24bit_output: false # [SDK >= 5.1] Enable BGR 24-bit output for lower bandwidth usage. If false, BGRA 32-bit output is used (old way before SDK v5.1) + publish_rgb: true # Advertise the RGB image topics that are published only if a node subscribes to them + publish_left_right: false # Advertise the left and right image topics that are published only if a node subscribes to them + publish_raw: false # Advertise the raw image topics that are published only if a node subscribes to them + publish_gray: false # Advertise the gray image topics that are published only if a node subscribes to them + publish_stereo: false # Advertise the stereo image topic that is published only if a node subscribes to it + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + sensors: + publish_imu_tf: true # [overwritten by launch file options] enable/disable the IMU TF broadcasting + sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message + sensors_pub_rate: 100. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + publish_imu: true # Advertise the IMU topic that is published only if a node subscribes to it + publish_imu_raw: false # Advertise the raw IMU topic that is published only if a node subscribes to it + publish_cam_imu_transf: false # Advertise the CAMERA-IMU transformation topic that is published only if a node subscribes to it + publish_mag: false # Advertise the magnetometer topic that is published only if a node subscribes to it + publish_baro: false # Advertise the barometer topic that is published only if a node subscribes to it + publish_temp: false # Advertise the temperature topics that are published only if a node subscribes to them + + region_of_interest: + automatic_roi: false # Enable the automatic ROI generation to automatically detect part of the robot in the FoV and remove them from the processing. Note: if enabled the value of `manual_polygon` is ignored + depth_far_threshold_meters: 2.5 # Filtering how far object in the ROI should be considered, this is useful for a vehicle for instance + image_height_ratio_cutoff: 0.5 # By default consider only the lower half of the image, can be useful to filter out the sky + #manual_polygon: '[]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.33],[0.75,0.33],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.25,0.25],[0.75,0.25],[0.75,0.75],[0.25,0.75]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #manual_polygon: '[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + apply_to_depth: true # Apply ROI to depth processing + apply_to_positional_tracking: true # Apply ROI to positional tracking processing + apply_to_object_detection: true # Apply ROI to object detection processing + apply_to_body_tracking: true # Apply ROI to body tracking processing + apply_to_spatial_mapping: true # Apply ROI to spatial mapping processing + publish_roi_mask: false # Advertise the ROI mask image topic that is published only if a node subscribes to it + + depth: + depth_mode: 'NEURAL_LIGHT' # Matches the ZED SDK setting: 'NONE', 'NEURAL_LIGHT', 'NEURAL', 'NEURAL_PLUS' - Note: if 'NONE' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...) + depth_stabilization: -1 # Negative value takes SDK default, 0 is disabled - Range: [0,100] + openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters] + point_cloud_freq: 10.0 # [DYNAMIC] Frequency of the pointcloud publishing. This value must be <= the camera framerate. Set to 0 for no limit (= grab_frame_rate). + point_cloud_res: 'COMPACT' # The resolution used for point cloud publishing - 'COMPACT'-Standard resolution. Optimizes processing and bandwidth, 'REDUCED'-Half resolution. Low processing and bandwidth requirements + depth_confidence: 95 # [DYNAMIC] + depth_texture_conf: 100 # [DYNAMIC] + remove_saturated_areas: true # [DYNAMIC] + publish_depth_map: true # Advertise the depth map topics that are published only if a node subscribes to them + publish_depth_info: false # Advertise the depth info topic that is published only if a node subscribes to it + publish_point_cloud: true # Advertise the point cloud topic that is published only if a node subscribes to it + publish_depth_confidence: false # Advertise the depth confidence topic that is published only if a node subscribes to it + publish_disparity: false # Advertise the disparity topic that is published only if a node subscribes to it + # Other parameters are defined, according to the camera model, in the 'zed.yaml', 'zedm.yaml', 'zed2.yaml', 'zed2i.yaml' + # 'zedx.yaml', 'zedxmini.yaml', 'virtual.yaml' files + + pos_tracking: + pos_tracking_enabled: true # True to enable positional tracking from start + pos_tracking_mode: 'AUTO' # 'AUTO' uses the SDK default; also supports 'GEN_1', 'GEN_3' + imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used. + publish_tf: true # [overwritten by launch file options] publish `odom -> camera_link` TF + publish_map_tf: true # [overwritten by launch file options] publish `map -> odom` TF + map_frame: 'map' + odometry_frame: 'odom' + area_memory: true # Enable to detect loop closure + area_file_path: '' # Path to the area memory file for relocalization and loop closure in a previously explored environment. + enable_localization_only: false # If true, the camera will only localize in the loaded area memory without updating the map with new information. + save_area_memory_on_closing: false # Save Area memory before closing the camera if `area_file_path` is not empty. You can also use the `save_area_memory` service to save the area memory at any time. + reset_odom_with_loop_closure: true # Re-initialize odometry to the last valid pose when loop closure happens (reset camera odometry drift) + publish_3d_landmarks: false # Publish 3D landmarks used by the positional tracking algorithm + publish_lm_skip_frame: 5 # Publish the landmarks every X frames to reduce bandwidth. Set to 0 to publish all landmarks + depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation + set_as_static: false # If 'true' the camera will be static and not move in the environment + set_gravity_as_origin: true # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose. + floor_alignment: false # Enable to automatically calculate camera/floor offset + initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `camera_link` frame in the map -> [X, Y, Z, R, P, Y] + path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency. Set to 0 for no limit (= grab_frame_rate). + path_max_count: -1 # use '-1' for unlimited path size + two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to 'fixed_z_value', roll and pitch to zero + fixed_z_value: 0.0 # Value to be used for Z coordinate if `two_d_mode` is true + transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->camera_link` transform being generated + reset_pose_with_svo_loop: true # Reset the camera pose the `initial_base_pose` when the SVO loop is enabled and the SVO playback reaches the end of the file. + publish_odom_pose: true # Advertise the odometry and pose topics that are published only if a node subscribes to them + publish_pose_cov: false # Advertise the pose with covariance topic that is published only if a node subscribes to it + publish_cam_path: false # Advertise the camera odometry and pose path topics that are published only if a node subscribes to them + + gnss_fusion: + gnss_fusion_enabled: false # fuse 'sensor_msg/NavSatFix' message information into pose data + gnss_fix_topic: '/fix' # Name of the GNSS topic of type NavSatFix to subscribe [Default: '/gps/fix'] + gnss_zero_altitude: false # Set to `true` to ignore GNSS altitude information + h_covariance_mul: 1.0 # Multiplier factor to be applied to horizontal covariance of the received fix (plane X/Y) + v_covariance_mul: 1.0 # Multiplier factor to be applied to vertical covariance of the received fix (Z axis) + publish_utm_tf: true # Publish `utm` -> `map` TF + broadcast_utm_transform_as_parent_frame: false # if 'true' publish `utm` -> `map` TF, otherwise `map` -> `utm` + enable_reinitialization: false # determines whether reinitialization should be performed between GNSS and VIO fusion when a significant disparity is detected between GNSS data and the current fusion data. It becomes particularly crucial during prolonged GNSS signal loss scenarios. + enable_rolling_calibration: true # If this parameter is set to true, the fusion algorithm will used a rough VIO / GNSS calibration at first and then refine it. This allow you to quickly get a fused position. + enable_translation_uncertainty_target: false # When this parameter is enabled (set to true), the calibration process between GNSS and VIO accounts for the uncertainty in the determined translation, thereby facilitating the calibration termination. The maximum allowable uncertainty is controlled by the 'target_translation_uncertainty' parameter. + gnss_vio_reinit_threshold: 5.0 # determines the threshold for GNSS/VIO reinitialization. If the fused position deviates beyond out of the region defined by the product of the GNSS covariance and the gnss_vio_reinit_threshold, a reinitialization will be triggered. + target_translation_uncertainty: 0.1 # defines the target translation uncertainty at which the calibration process between GNSS and VIO concludes. By default, the threshold is set at 10 centimeters. + target_yaw_uncertainty: 0.1 # defines the target yaw uncertainty at which the calibration process between GNSS and VIO concludes. The unit of this parameter is in radian. By default, the threshold is set at 0.1 radians. + + mapping: + mapping_enabled: false # True to enable mapping t # Pand fused point cloud pubblication + resolution: 0.05 # maps resolution in meters [min: 0.01f - max: 0.2f] + max_mapping_range: 5.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0] + fused_pointcloud_freq: 1.0 # frequency of the publishing of the fused colored point cloud. Set to 0 for no limit (= point_cloud_freq). + clicked_point_topic: '/clicked_point' # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection + pd_max_distance_threshold: 0.15 # Plane detection: controls the spread of plane by checking the position difference. + pd_normal_similarity_threshold: 15.0 # Plane detection: controls the spread of plane by checking the angle difference. + publish_det_plane: false # Advertise the plane detection topics that is published only if a node subscribes to it + + object_detection: + od_enabled: false # True to enable Object Detection + enable_tracking: true # Whether the object detection system includes object tracking capabilities across a sequence of images. + detection_model: 'MULTI_CLASS_BOX_FAST' # 'MULTI_CLASS_BOX_FAST', 'MULTI_CLASS_BOX_MEDIUM', 'MULTI_CLASS_BOX_ACCURATE', 'PERSON_HEAD_BOX_FAST', 'PERSON_HEAD_BOX_ACCURATE', 'CUSTOM_YOLOLIKE_BOX_OBJECTS' + max_range: 40.0 # [m] Upper depth range for detections.The value cannot be greater than 'depth.max_depth' + filtering_mode: 'NMS3D' # Filtering mode that should be applied to raw detections: 'NONE', 'NMS3D', 'NMS3D_PER_CLASS' + prediction_timeout: 2.0 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + # Other parameters are defined in the 'object_detection.yaml' and 'custom_object_detection.yaml' files + + body_tracking: + bt_enabled: false # True to enable Body Tracking + model: 'HUMAN_BODY_MEDIUM' # 'HUMAN_BODY_FAST', 'HUMAN_BODY_MEDIUM', 'HUMAN_BODY_ACCURATE' + body_format: 'BODY_38' # 'BODY_18','BODY_34','BODY_38' + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 15.0 # [m] Defines a upper depth range for detections + body_kp_selection: 'FULL' # 'FULL', 'UPPER_BODY' + enable_body_fitting: false # Defines if the body fitting will be applied + enable_tracking: true # Defines if the object detection will track objects across images flow + prediction_timeout_s: 0.5 # During this time [sec], the skeleton will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of skeleton key points [0,99] + minimum_keypoints_threshold: 5 # [DYNAMIC] - Minimum number of skeleton key points to be detected for a valid skeleton + + stream_server: + stream_enabled: false # enable the streaming server when the camera is open + codec: 'H265' # different encoding types for image streaming: 'H264', 'H265' + port: 30000 # Port used for streaming. Port must be an even number. Any odd number will be rejected. + bitrate: 12500 # [1000 - 60000] Streaming bitrate (in Kbits/s) used for streaming. See https://www.stereolabs.com/docs/api/structsl_1_1StreamingParameters.html#a873ba9440e3e9786eb1476a3bfa536d0 + gop_size: -1 # [max 256] The GOP size determines the maximum distance between IDR/I-frames. Very high GOP size will result in slightly more efficient compression, especially on static scenes. But latency will increase. + adaptative_bitrate: false # Bitrate will be adjusted depending the number of packet dropped during streaming. If activated, the bitrate can vary between [bitrate/4, bitrate]. + chunk_size: 16084 # [1024 - 65000] Stream buffers are divided into X number of chunks where each chunk is chunk_size bytes long. You can lower chunk_size value if network generates a lot of packet lost: this will generates more chunk for a single image, but each chunk sent will be lighter to avoid inside-chunk corruption. Increasing this value can decrease latency. + target_framerate: 0 # Framerate for the streaming output. This framerate must be below or equal to the camera framerate. Allowed framerates are 15, 30, 60 or 100 if possible. Any other values will be discarded and camera FPS will be taken. + + advanced: # WARNING: do not modify unless you are confident of what you are doing + # Reference documentation: https://man7.org/linux/man-pages/man7/sched.7.html + thread_sched_policy: 'SCHED_BATCH' # 'SCHED_OTHER', 'SCHED_BATCH', 'SCHED_FIFO', 'SCHED_RR' - NOTE: 'SCHED_FIFO' and 'SCHED_RR' require 'sudo' + thread_grab_priority: 50 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_sensor_priority: 70 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + thread_pointcloud_priority: 60 # ONLY with 'SCHED_FIFO' and 'SCHED_RR' - [1 (LOW) z-> 99 (HIGH)] - NOTE: 'sudo' required + + debug: + sdk_verbose: 1 # Set the verbose level of the ZED SDK + sdk_verbose_log_file: '' # Path to the file where the ZED SDK will log its messages. If empty, no file will be created. The log level can be set using the `sdk_verbose` parameter. + use_pub_timestamps: false # Use the current ROS time for the message timestamp instead of the camera timestamp. This is useful to test data communication latency. + debug_common: false # Enable/Disable the printing of debug messages related to the common parameters and functionalities + debug_dyn_params: false # Enable/Disable the printing of debug messages related to dynamic parameters updates + debug_grab: false # Enable/Disable the printing of debug messages related to the grab function and thread + debug_sim: false # Enable/Disable the printing of debug messages related to the simulation mode and connection to the simulation server + debug_video_depth: false # Enable/Disable the printing of debug messages related to video and depth parameters and functionalities + debug_camera_controls: false # Enable/Disable the printing of debug messages related to camera controls (exposure, gain, white balance, etc) + debug_point_cloud: false # Enable/Disable the printing of debug messages related to point cloud generation and publishing + debug_tf: false # Enable/Disable the printing of debug messages related to TF broadcasting + debug_positional_tracking: false # Enable/Disable the printing of debug messages related to positional tracking + debug_gnss: false # Enable/Disable the printing of debug messages related to GNSS functionalities + debug_sensors: false # Enable/Disable the printing of debug messages related to sensor data + debug_mapping: false # Enable/Disable the printing of debug messages related to mapping functionalities + debug_terrain_mapping: false # Enable/Disable the printing of debug messages related to terrain mapping functionalities + debug_object_detection: false # Enable/Disable the printing of debug messages related to object detection functionalities + debug_body_tracking: false # Enable/Disable the printing of debug messages related to body tracking functionalities + debug_roi: false # Enable/Disable the printing of debug messages related to region of interest functionalities + debug_streaming: false # Enable/Disable the printing of debug messages related to the network streaming functionalities + debug_advanced: false # Enable/Disable the printing of debug messages related to advanced functionalities + debug_nitros: false # Enable/Disable the printing of debug messages related to NITROS functionalities + disable_nitros: false # If available, disable NITROS usage for debugging and testing purposes diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/custom_object_detection.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/custom_object_detection.yaml new file mode 100644 index 000000000..370ddce59 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/custom_object_detection.yaml @@ -0,0 +1,1943 @@ +# Example of a custom object detection configuration file for the ZED ROS2 wrapper +# This file is used to configure the CUSTOM object detection parameters for the ZED camera. +# The parameters are defined in a YAML format and can be modified to suit the user's needs. +# The parameters are divided into sections for each CUSTOM object class, with each section containing +# specific parameters for that class. + +# This is an example of configuration file for a custom object detection model trained on the COCO dataset. +# COCO 2017 dataset https://cocodataset.org by Microsoft + +# To create a custom ONNX model, you can follow the online documentation: https://www.stereolabs.com/docs/yolo/export + +/**: + ros__parameters: + object_detection: + custom_onnx_file: '' # Path to the YOLO-like ONNX file for custom object detection directly performed by the ZED SDK + custom_onnx_input_size: 512 # Resolution used with the YOLO-like ONNX file. For example, 512 means a input tensor '1x3x512x512' + + custom_class_count: 80 # Number of classes in the custom ONNX file. For example, 80 for YOLOv8 trained on COCO dataset + + # TODO: Add one instance of each class to the list below + # Note: create a class_XXX identifier for each class in the custom ONNX file. + # Note: XXX is a number from 000 to 'custom_class_count-1', and it must be unique for each class. + # Note: the class_XXX identifier is not required to match the class ID [model_class_id] in the custom ONNX file. + + class_000: + label: 'person' + model_class_id: 0 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_001: + label: 'bicycle' + model_class_id: 1 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_002: + label: 'car' + model_class_id: 2 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_003: + label: 'motorcycle' + model_class_id: 3 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_004: + label: 'airplane' + model_class_id: 4 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_005: + label: 'bus' + model_class_id: 5 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_006: + label: 'train' + model_class_id: 6 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_007: + label: 'truck' + model_class_id: 7 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_008: + label: 'boat' + model_class_id: 8 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_009: + label: 'traffic light' + model_class_id: 9 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_010: + label: 'fire hydrant' + model_class_id: 10 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_011: + label: 'stop sign' + model_class_id: 11 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_012: + label: 'parking meter' + model_class_id: 12 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_013: + label: 'bench' + model_class_id: 13 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_014: + label: 'bird' + model_class_id: 14 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_015: + label: 'cat' + model_class_id: 15 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_016: + label: 'dog' + model_class_id: 16 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_017: + label: 'horse' + model_class_id: 17 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_018: + label: 'sheep' + model_class_id: 18 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_019: + label: 'cow' + model_class_id: 19 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_020: + label: 'elephant' + model_class_id: 20 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_021: + label: 'bear' + model_class_id: 21 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_022: + label: 'zebra' + model_class_id: 22 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_023: + label: 'giraffe' + model_class_id: 23 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_024: + label: 'backpack' + model_class_id: 24 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_025: + label: 'umbrella' + model_class_id: 25 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_026: + label: 'handbag' + model_class_id: 26 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_027: + label: 'tie' + model_class_id: 27 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_028: + label: 'suitcase' + model_class_id: 28 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_029: + label: 'frisbee' + model_class_id: 29 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_030: + label: 'skis' + model_class_id: 30 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_031: + label: 'snowboard' + model_class_id: 31 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_032: + label: 'pesports ballrson' + model_class_id: 32 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_033: + label: 'kite' + model_class_id: 33 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_034: + label: 'baseball bat' + model_class_id: 34 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_035: + label: 'baseball glove' + model_class_id: 35 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_036: + label: 'skateboard' + model_class_id: 36 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_037: + label: 'surfboard' + model_class_id: 37 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_038: + label: 'tennis racket' + model_class_id: 38 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_039: + label: 'bottle' + model_class_id: 39 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_040: + label: 'wine glass' + model_class_id: 40 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_041: + label: 'cup' + model_class_id: 41 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_042: + label: 'fork' + model_class_id: 42 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_043: + label: 'knife' + model_class_id: 43 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_044: + label: 'spoon' + model_class_id: 44 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_045: + label: 'bowl' + model_class_id: 45 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_046: + label: 'banana' + model_class_id: 46 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_047: + label: 'apple' + model_class_id: 47 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_048: + label: 'sandwich' + model_class_id: 48 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_049: + label: 'orange' + model_class_id: 49 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_050: + label: 'broccoli' + model_class_id: 50 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_051: + label: 'carrot' + model_class_id: 51 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_052: + label: 'hot dog' + model_class_id: 52 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_053: + label: 'pizza' + model_class_id: 53 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_054: + label: 'donut' + model_class_id: 54 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_055: + label: 'cake' + model_class_id: 55 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_056: + label: 'chair' + model_class_id: 56 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_057: + label: 'couch' + model_class_id: 57 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_058: + label: 'potted plant' + model_class_id: 58 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_059: + label: 'bed' + model_class_id: 59 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_060: + label: 'dining table' + model_class_id: 60 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_061: + label: 'toilet' + model_class_id: 61 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_062: + label: 'tv' + model_class_id: 62 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_063: + label: 'laptop' + model_class_id: 63 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_064: + label: 'mouse' + model_class_id: 64 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_065: + label: 'remote' + model_class_id: 65 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_066: + label: 'keyboard' + model_class_id: 66 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_067: + label: 'cell phone' + model_class_id: 67 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_068: + label: 'microwave' + model_class_id: 68 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_069: + label: 'oven' + model_class_id: 69 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_070: + label: 'toaster' + model_class_id: 70 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_071: + label: 'sink' + model_class_id: 71 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_072: + label: 'refrigerator' + model_class_id: 72 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_073: + label: 'book' + model_class_id: 73 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_074: + label: 'clock' + model_class_id: 74 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_075: + label: 'vase' + model_class_id: 75 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: true # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: true # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_076: + label: 'scissors' + model_class_id: 76 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_077: + label: 'teddy bear' + model_class_id: 77 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_078: + label: 'hair drier' + model_class_id: 78 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds + + class_079: + label: 'toothbrush' + model_class_id: 79 # Class ID of the object in the custom ONNX file (it is not required that this value matches the value in the 'class_XXX' identifier) + enabled: true # Enable/disable the detection of this class + confidence_threshold: 50.0 # Minimum value of the detection confidence of an object [0,99] + is_grounded: false # Provide hypothesis about the object movements (degrees of freedom or DoF) to improve the object tracking + is_static: false # Provide hypothesis about the object staticity to improve the object tracking + tracking_timeout: -1.0 # Maximum tracking time threshold (in seconds) before dropping the tracked object when unseen for this amount of time + tracking_max_dist: -1.0 # Maximum tracking distance threshold (in meters) before dropping the tracked object when unseen for this amount of meters. Only valid for static object + max_box_width_normalized: -1.0 # Maximum allowed width normalized to the image size + min_box_width_normalized: -1.0 # Minimum allowed width normalized to the image size + max_box_height_normalized: -1.0 # Maximum allowed height normalized to the image size + min_box_height_normalized: -1.0 # Minimum allowed height normalized to the image size + max_box_width_meters: -1.0 # Maximum allowed 3D width + min_box_width_meters: -1.0 # Minimum allowed 3D width + max_box_height_meters: -1.0 # Maximum allowed 3D height + min_box_height_meters: -1.0 # Minimum allowed 3D height + object_acceleration_preset: 'DEFAULT' # Object acceleration preset. Possible values: 'DEFAULT', 'LOW', 'MEDIUM', 'HIGH' + max_allowed_acceleration: 100000.0 # If set with a different value from the default [100000], this value takes precedence over the selected preset, allowing for a custom maximum acceleration. Unit is m/s^2. + velocity_smoothing_factor: 0.5 # Control the smoothing of the velocity estimation. Values between 0.0 and 1.0. Default: 0.5 + min_velocity_threshold: 0.2 # Threshold to force an object's velocity to zero. Default: 0.2 m/s + prediction_timeout_s: 0.5 # Duration to keep predicting a track's position after occlusion. Default: 0.5 seconds + min_confirmation_time_s: 0.05 # Minimum confirmation time required to validate a track. Default: 0.05 seconds diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/object_detection.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/object_detection.yaml new file mode 100644 index 000000000..42a39a7e4 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/object_detection.yaml @@ -0,0 +1,26 @@ +/**: + ros__parameters: + object_detection: + class: + people: + enabled: true # [DYNAMIC] - Enable/disable the detection of persons + confidence_threshold: 65.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + vehicle: + enabled: true # [DYNAMIC] - Enable/disable the detection of vehicles + confidence_threshold: 60.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + bag: + enabled: true # [DYNAMIC] - Enable/disable the detection of bags + confidence_threshold: 40.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + animal: + enabled: true # [DYNAMIC] - Enable/disable the detection of animals + confidence_threshold: 40.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + electronics: + enabled: true # [DYNAMIC] - Enable/disable the detection of electronic devices + confidence_threshold: 45.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + fruit_vegetable: + enabled: true # [DYNAMIC] - Enable/disable the detection of fruits and vegetables + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + sport: + enabled: true # [DYNAMIC] - Enable/disable the detection of sport-related objects + confidence_threshold: 30.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/virtual.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/virtual.yaml new file mode 100644 index 000000000..52ecec8b1 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/virtual.yaml @@ -0,0 +1,27 @@ +# config/zedx.yaml +# Parameters for Stereolabs ZED X camera +--- +/**: + ros__parameters: + general: + camera_model: 'virtual' + camera_name: 'virtual' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. '4K', 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 1000.0 diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed.yaml new file mode 100644 index 000000000..a466dd24e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed.yaml @@ -0,0 +1,19 @@ +# config/zed_yaml +# Parameters for Stereolabs ZED camera +--- +/**: + ros__parameters: + general: + camera_model: 'zed' + camera_name: 'zed' # overwritten by launch file + grab_resolution: 'HD720' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2.yaml new file mode 100644 index 000000000..2d39fce48 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2.yaml @@ -0,0 +1,20 @@ +# config/zed2_yaml +# Parameters for Stereolabs ZED2 camera +--- +/**: + ros__parameters: + general: + camera_model: 'zed2' + camera_name: 'zed2' # overwritten by launch file + grab_resolution: 'HD720' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2i.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2i.yaml new file mode 100644 index 000000000..e07a59b4a --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zed2i.yaml @@ -0,0 +1,21 @@ +# config/zed2i_yaml +# Parameters for Stereolabs zed2i camera + +--- +/**: + ros__parameters: + general: + camera_model: 'zed2i' + camera_name: 'zed2i' # overwritten by launch file + grab_resolution: 'HD1080' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedm.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedm.yaml new file mode 100644 index 000000000..66319a997 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedm.yaml @@ -0,0 +1,20 @@ +# config/zedm_yaml +# Parameters for Stereolabs ZED mini camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedm' + camera_name: 'zedm' # overwritten by launch file + grab_resolution: 'HD1080' # The native camera grab resolution. 'HD2K', 'HD1080', 'HD720', 'VGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate + + video: + brightness: 4 # [DYNAMIC] Image brightness. Range: 0-8 + contrast: 4 # [DYNAMIC] Image contrast. Range: 0-8 + hue: 0 # [DYNAMIC] Image hue. Range: 0 to 11 + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 15.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedx.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedx.yaml new file mode 100644 index 000000000..897b76af5 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedx.yaml @@ -0,0 +1,28 @@ +# config/zedx.yaml +# Parameters for Stereolabs ZED X camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedx' + camera_name: 'zedx' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdr.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdr.yaml new file mode 100644 index 000000000..3cf417568 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdr.yaml @@ -0,0 +1,28 @@ +# config/zedxhdr.yaml +# Parameters for Stereolabs ZED X HDR camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdr' + camera_name: 'zedxhdr' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 15.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmax.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmax.yaml new file mode 100644 index 000000000..85df02e0e --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmax.yaml @@ -0,0 +1,28 @@ +# config/zedxhdrmax.yaml +# Parameters for Stereolabs ZED X HDR MAX camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdrmax' + camera_name: 'zedxhdrmax' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 20.0 # Min: 0.5, Max: 40.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmini.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmini.yaml new file mode 100644 index 000000000..39200c2d0 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxhdrmini.yaml @@ -0,0 +1,28 @@ +# config/zedxhdrmini.yaml +# Parameters for Stereolabs ZED X HDR MINI camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxhdrmini' + camera_name: 'zedxhdrmini' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) + + video: + exposure_time: 16000 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 30000 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 1255 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 1 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxm.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxm.yaml new file mode 100644 index 000000000..833b5dffa --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxm.yaml @@ -0,0 +1,28 @@ +# config/zedxm_yaml +# Parameters for Stereolabs ZED X Mini camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxm' + camera_name: 'zedxm' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + exposure_time: 16666 # Defines the real exposure time in microseconds. Recommended to control manual exposure (instead of `video.exposure` setting) + auto_exposure_time_range_min: 28 # Defines the minimum range of exposure auto control in micro seconds + auto_exposure_time_range_max: 16666 # Defines the maximum range of exposure auto control in micro seconds + exposure_compensation: 50 # Defines the Exposure-target compensation made after auto exposure. Reduces the overall illumination target by factor of F-stops. Values range is [0 - 100]. Default value is 50, i.e. no compensation applied + analog_gain: 8000 # Defines the real analog gain (sensor) in mDB. Range [1000-16000]. Recommended to control manual sensor gain (instead of `video.gain` setting) + auto_analog_gain_range_min: 1000 # Defines the minimum range of sensor gain in automatic control + auto_analog_gain_range_max: 16000 # Defines the maximum range of sensor gain in automatic control + digital_gain: 128 # Defines the real digital gain (ISP) as a factor. Range [1-256]. Recommended to control manual ISP gain (instead of `video.gain` setting) + auto_digital_gain_range_min: 1 # Defines the minimum range of digital ISP gain in automatic control + auto_digital_gain_range_max: 256 # Defines the maximum range of digital ISP gain in automatic control + denoising: 50 # Defines the level of denoising applied on both left and right images. Range [0-100] + + depth: + min_depth: 0.01 # Min: 0.01, Max: 3.0 + max_depth: 10.0 # Min: 0.5, Max: 20.0 + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxone4k.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxone4k.yaml new file mode 100644 index 000000000..a8efcaa88 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxone4k.yaml @@ -0,0 +1,14 @@ +# config/zedxone4k.yaml +# Parameters for Stereolabs ZED X One 4K camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxone4k' + camera_name: 'zedxone4k' # overwritten by launch file + grab_resolution: 'QHDPLUS' # The native camera grab resolution. 'HD4K', 'HD1200', 'QHDPLUS, 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD4K/QHDPLUS: 15 - HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) + + video: + enable_hdr: false # When set to true, the camera will be set in HDR mode if the camera model and resolution allows it + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonegs.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonegs.yaml new file mode 100644 index 000000000..f4d08b002 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonegs.yaml @@ -0,0 +1,10 @@ +# config/zedxonegs.yaml +# Parameters for Stereolabs ZED X One GS camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxonegs' + camera_name: 'zedxonegs' # overwritten by launch file + grab_resolution: 'HD1200' # The native camera grab resolution. 'HD1200', 'HD1080', 'SVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1200/HD1080: 60, 30, 15 - SVGA: 120, 60, 30, 15) \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonehdr.yaml b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonehdr.yaml new file mode 100644 index 000000000..e9f756744 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/config/zedxonehdr.yaml @@ -0,0 +1,10 @@ +# config/zedxonehdr.yaml +# Parameters for Stereolabs ZED X One HDR camera +--- +/**: + ros__parameters: + general: + camera_model: 'zedxonehdr' + camera_name: 'zedxonehdr' # overwritten by launch file + grab_resolution: 'HD1536' # The native camera grab resolution. 'HD1536', 'XVGA', 'AUTO' + grab_frame_rate: 30 # ZED SDK internal grabbing rate (HD1536: 30, XVGA: 30) \ No newline at end of file diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/launch/zed_camera.launch.py b/src/lib/zed-ros2-wrapper/zed_wrapper/launch/zed_camera.launch.py new file mode 100644 index 000000000..d2469bb94 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/launch/zed_camera.launch.py @@ -0,0 +1,604 @@ +# Copyright 2025 Stereolabs +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import yaml + +from ament_index_python.packages import get_package_share_directory + +from launch import LaunchDescription +from launch.actions import ( + DeclareLaunchArgument, + OpaqueFunction, + LogInfo +) +from launch.conditions import IfCondition +from launch.substitutions import ( + LaunchConfiguration, + Command, + TextSubstitution +) +from launch_ros.actions import ( + Node, + ComposableNodeContainer, + LoadComposableNodes +) +from launch_ros.descriptions import ComposableNode + +# Enable colored output +os.environ["RCUTILS_COLORIZED_OUTPUT"] = "1" + +# ZED Configurations to be loaded by ZED Node +default_config_common = os.path.join( + get_package_share_directory('zed_wrapper'), + 'config', + 'common' +) + +# Object Detection Configuration to be loaded by ZED Node +default_object_detection_config_path = os.path.join( + get_package_share_directory('zed_wrapper'), + 'config', + 'object_detection.yaml' +) +# Custom Object Detection Configuration to be loaded by ZED Node +default_custom_object_detection_config_path = os.path.join( + get_package_share_directory('zed_wrapper'), + 'config', + 'custom_object_detection.yaml' +) + +# URDF/xacro file to be loaded by the Robot State Publisher node +default_xacro_path = os.path.join( + get_package_share_directory('zed_description'), + 'urdf', + 'zed_descr.urdf.xacro' +) + +# Function to parse array-like launch arguments +def parse_array_param(param): + cleaned = param.replace('[', '').replace(']', '').replace(' ', '') + if not cleaned: + return [] + return cleaned.split(',') + + +def _auto_type(value): + """Convert a string value to the most appropriate Python type.""" + lowered = value.strip().lower() + if lowered == 'true': + return True + if lowered == 'false': + return False + try: + return int(value) + except ValueError: + pass + try: + return float(value) + except ValueError: + pass + return value + + +def _parse_param_overrides(overrides_str): + """Parse semicolon-separated 'key:=value' pairs into a dict.""" + result = {} + if not overrides_str: + return result + for pair in overrides_str.split(';'): + pair = pair.strip() + if ':=' in pair: + key, value = pair.split(':=', 1) + result[key.strip()] = _auto_type(value.strip()) + return result + + +def _to_bool(value): + if isinstance(value, bool): + return value + + if isinstance(value, str): + lowered = value.strip().lower() + if lowered == 'true': + return True + if lowered == 'false': + return False + + return None + + +def _find_disable_nitros(node): + if isinstance(node, dict): + ros_params = node.get('ros__parameters') + if isinstance(ros_params, dict): + debug_section = ros_params.get('debug') + if isinstance(debug_section, dict) and 'disable_nitros' in debug_section: + return _to_bool(debug_section['disable_nitros']) + + for value in node.values(): + found = _find_disable_nitros(value) + if found is not None: + return found + + return None + + +def _get_disable_nitros_from_file(file_path): + if not file_path or not os.path.isfile(file_path): + return None + + try: + with open(file_path, 'r', encoding='utf-8') as yaml_file: + data = yaml.safe_load(yaml_file) + + if data is None: + return None + + return _find_disable_nitros(data) + except Exception: + return None + +def launch_setup(context, *args, **kwargs): + return_array = [] + + # Launch configuration variables + node_log_type = LaunchConfiguration('node_log_type') + + svo_path = LaunchConfiguration('svo_path') + publish_svo_clock = LaunchConfiguration('publish_svo_clock') + + enable_ipc = LaunchConfiguration('enable_ipc') + ipc_nitros_conflict_policy = LaunchConfiguration('ipc_nitros_conflict_policy') + use_sim_time = LaunchConfiguration('use_sim_time') + sim_mode = LaunchConfiguration('sim_mode') + sim_address = LaunchConfiguration('sim_address') + sim_port = LaunchConfiguration('sim_port') + + stream_address = LaunchConfiguration('stream_address') + stream_port = LaunchConfiguration('stream_port') + + container_name = LaunchConfiguration('container_name') + namespace = LaunchConfiguration('namespace') + camera_name = LaunchConfiguration('camera_name') + camera_model = LaunchConfiguration('camera_model') + + node_name = LaunchConfiguration('node_name') + + ros_params_override_path = LaunchConfiguration('ros_params_override_path') + object_detection_config_path = LaunchConfiguration('object_detection_config_path') + custom_object_detection_config_path = LaunchConfiguration('custom_object_detection_config_path') + param_overrides = LaunchConfiguration('param_overrides') + + serial_number = LaunchConfiguration('serial_number') + camera_id = LaunchConfiguration('camera_id') + + serial_numbers = LaunchConfiguration('serial_numbers') + camera_ids = LaunchConfiguration('camera_ids') + + publish_urdf = LaunchConfiguration('publish_urdf') + publish_tf = LaunchConfiguration('publish_tf') + publish_map_tf = LaunchConfiguration('publish_map_tf') + publish_imu_tf = LaunchConfiguration('publish_imu_tf') + xacro_path = LaunchConfiguration('xacro_path') + + enable_gnss = LaunchConfiguration('enable_gnss') + gnss_antenna_offset = LaunchConfiguration('gnss_antenna_offset') + + node_log_type_val = node_log_type.perform(context) + container_name_val = container_name.perform(context) + namespace_val = namespace.perform(context) + camera_name_val = camera_name.perform(context) + camera_model_val = camera_model.perform(context) + node_name_val = node_name.perform(context) + enable_gnss_val = enable_gnss.perform(context) + gnss_coords = parse_array_param(gnss_antenna_offset.perform(context)) + serial_numbers_val = serial_numbers.perform(context) + camera_ids_val = camera_ids.perform(context) + + stereo_models = ( + 'zed', 'zedm', 'zed2', 'zed2i', + 'zedx', 'zedxm', + 'zedxhdr', 'zedxhdrmini', 'zedxhdrmax', + 'virtual' + ) + is_stereo_model = camera_model_val in stereo_models + + if(node_log_type_val == 'both'): + node_log_effective = 'both' + else: # 'screen' or 'log' + node_log_effective = { + 'stdout': node_log_type_val, + 'stderr': node_log_type_val + } + + if (camera_name_val == ''): + camera_name_val = 'zed' + + if (camera_model_val == 'virtual'): + # Virtual Stereo Camera setup + serials = parse_array_param(serial_numbers_val) + ids = parse_array_param(camera_ids_val) + + # If not in live mode, at least one of serials or ids must be a valid 2-values array + if(len(serials) != 2 and len(ids) != 2 and svo_path.perform(context) == 'live'): + return [ + LogInfo(msg=TextSubstitution( + text='With a Virtual Stereo Camera setup, one of `serial_numbers` or `camera_ids` launch arguments must contain two valid values (Left and Right camera identification).')) + ] + + if(namespace_val == ''): + namespace_val = camera_name_val + else: + node_name_val = camera_name_val + + # Common configuration file + if is_stereo_model: + config_common_path_val = default_config_common + '_stereo.yaml' + else: + config_common_path_val = default_config_common + '_mono.yaml' + + info = 'Using common configuration file: ' + config_common_path_val + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Camera configuration file + config_camera_path = os.path.join( + get_package_share_directory('zed_wrapper'), + 'config', + camera_model_val + '.yaml' + ) + + info = 'Using camera configuration file: ' + config_camera_path + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Object Detection configuration file + info = 'Using Object Detection configuration file: ' + object_detection_config_path.perform(context) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # Custom Object Detection configuration file + info = 'Using Custom Object Detection configuration file: ' + custom_object_detection_config_path.perform(context) + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + # ROS parameters override file + ros_params_override_path_val = ros_params_override_path.perform(context) + if(ros_params_override_path_val != ''): + info = 'Using ROS parameters override file: ' + ros_params_override_path_val + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + enable_ipc_effective = enable_ipc.perform(context) == 'true' + ipc_nitros_conflict_policy_val = ipc_nitros_conflict_policy.perform(context) + + # Effective `debug.disable_nitros` value from loaded YAML files. + # Component defaults differ if parameter is not present. + # - ZedCamera (stereo/virtual): default true + # - ZedCameraOne (mono): default false + nitros_disabled_effective = True if is_stereo_model else False + + common_disable_nitros = _get_disable_nitros_from_file(config_common_path_val) + if common_disable_nitros is not None: + nitros_disabled_effective = common_disable_nitros + + if ros_params_override_path_val != '': + override_disable_nitros = _get_disable_nitros_from_file(ros_params_override_path_val) + if override_disable_nitros is not None: + nitros_disabled_effective = override_disable_nitros + + # Parse inline parameter overrides from command line + param_overrides_dict = _parse_param_overrides(param_overrides.perform(context)) + + # Check if disable_nitros is set via inline overrides + inline_disable_nitros = _to_bool(param_overrides_dict.get('debug.disable_nitros')) + if inline_disable_nitros is not None: + nitros_disabled_effective = inline_disable_nitros + + if enable_ipc_effective and not nitros_disabled_effective: + conflict_msg = ( + 'Invalid configuration: `enable_ipc:=true` with ' + '`debug.disable_nitros:=false` can cause NITROS startup failure ' + '(volatile durability conflict).' + ) + + if ipc_nitros_conflict_policy_val == 'disable_ipc': + enable_ipc_effective = False + return_array.append(LogInfo(msg=TextSubstitution( + text='WARNING: ' + conflict_msg + ' Forcing `enable_ipc:=false` for this launch.'))) + else: + raise RuntimeError(conflict_msg + ' Set `enable_ipc:=false` or `debug.disable_nitros:=true`.') + + # Xacro command with options + xacro_command = [] + xacro_command.append('xacro') + xacro_command.append(' ') + xacro_command.append(xacro_path.perform(context)) + xacro_command.append(' ') + xacro_command.append('camera_name:=') + xacro_command.append(camera_name_val) + xacro_command.append(' ') + xacro_command.append('camera_model:=') + xacro_command.append(camera_model_val) + xacro_command.append(' ') + if(enable_gnss_val=='true'): + xacro_command.append(' ') + xacro_command.append('enable_gnss:=true') + xacro_command.append(' ') + if(len(gnss_coords)==3): + xacro_command.append('gnss_x:=') + xacro_command.append(gnss_coords[0]) + xacro_command.append(' ') + xacro_command.append('gnss_y:=') + xacro_command.append(gnss_coords[1]) + xacro_command.append(' ') + xacro_command.append('gnss_z:=') + xacro_command.append(gnss_coords[2]) + xacro_command.append(' ') + + # Robot State Publisher node + rsp_name = camera_name_val + '_state_publisher' + rsp_node = Node( + condition=IfCondition(publish_urdf), + package='robot_state_publisher', + namespace=namespace_val, + executable='robot_state_publisher', + name=rsp_name, + output=node_log_effective, + parameters=[{ + 'use_sim_time': publish_svo_clock, + 'robot_description': Command(xacro_command) + }], + remappings=[('robot_description', camera_name_val+'_description')] + ) + return_array.append(rsp_node) + + # ROS 2 Component Container + if(container_name_val == ''): + container_name_val='zed_container' + distro = os.environ['ROS_DISTRO'] + if distro == 'foxy': + # Foxy does not support the isolated mode + container_exec='component_container' + arguments_val=['--ros-args', '--log-level', 'info'] + else: + container_exec='component_container_isolated' + arguments_val=['--use_multi_threaded_executor','--ros-args', '--log-level', 'info'] + #arguments_val=['--use_multi_threaded_executor','--ros-args', '--log-level', 'debug'] + + zed_container = ComposableNodeContainer( + name=container_name_val, + namespace=namespace_val, + package='rclcpp_components', + executable=container_exec, + arguments=arguments_val, + output=node_log_effective, + composable_node_descriptions=[] + ) + return_array.append(zed_container) + + # ZED Node parameters + node_parameters = [ + # YAML files + config_common_path_val, # Common parameters + config_camera_path, # Camera related parameters + object_detection_config_path, # Object detection parameters + custom_object_detection_config_path # Custom object detection parameters + ] + + if( ros_params_override_path_val != ''): + node_parameters.append(ros_params_override_path) + + node_parameters.append( + # Launch arguments must override the YAML files values + { + 'use_sim_time': use_sim_time, + 'simulation.sim_enabled': sim_mode, + 'simulation.sim_address': sim_address, + 'simulation.sim_port': sim_port, + 'stream.stream_address': stream_address, + 'stream.stream_port': stream_port, + 'general.camera_name': camera_name_val, + 'general.camera_model': camera_model_val, + 'svo.svo_path': svo_path, + 'svo.publish_svo_clock': publish_svo_clock, + 'general.serial_number': serial_number, + 'general.camera_id': camera_id, + 'pos_tracking.publish_tf': publish_tf, + 'pos_tracking.publish_map_tf': publish_map_tf, + 'sensors.publish_imu_tf': publish_imu_tf, + 'gnss_fusion.gnss_fusion_enabled': enable_gnss, + 'general.virtual_serial_numbers': serial_numbers_val, + 'general.virtual_camera_ids': camera_ids_val + } + ) + + # Append inline parameter overrides (highest priority) + if param_overrides_dict: + node_parameters.append(param_overrides_dict) + + # ZED Wrapper component + if is_stereo_model: + zed_wrapper_component = ComposableNode( + package='zed_components', + namespace=namespace_val, + plugin='stereolabs::ZedCamera', + name=node_name_val, + parameters=node_parameters, + extra_arguments=[{'use_intra_process_comms': enable_ipc_effective}] + ) + else: # camera_model_val == 'zedxonegs' or camera_model_val == 'zedxone4k' or camera_model_val == 'zedxonehdr' + zed_wrapper_component = ComposableNode( + package='zed_components', + namespace=namespace_val, + plugin='stereolabs::ZedCameraOne', + name=node_name_val, + parameters=node_parameters, + extra_arguments=[{'use_intra_process_comms': enable_ipc_effective}] + ) + + full_container_name = '/' + namespace_val + '/' + container_name_val + info = 'Loading ZED node `' + node_name_val + '` in container `' + full_container_name + '`' + return_array.append(LogInfo(msg=TextSubstitution(text=info))) + + load_composable_node = LoadComposableNodes( + target_container=full_container_name, + composable_node_descriptions=[zed_wrapper_component] + ) + return_array.append(load_composable_node) + + return return_array + +def generate_launch_description(): + return LaunchDescription( + [ + # Declare launch arguments + DeclareLaunchArgument( + 'node_log_type', + default_value=TextSubstitution(text='both'), + description='The log type of the node. It can be `screen`, `log` or `both`. The `log` type will save the log in a file in the `~/.ros/log/` folder. The `screen` type will print the log on the terminal. The `both` type will do both.', + choices=['screen', 'log', 'both']), + DeclareLaunchArgument( + 'camera_name', + default_value=TextSubstitution(text='zed'), + description='The name of the camera. It can be different from the camera model and it will be used as node `namespace`.'), + DeclareLaunchArgument( + 'camera_model', + description='[REQUIRED] The model of the camera. Using a wrong camera model can disable camera features.', + choices=['zed', 'zedm', 'zed2', 'zed2i', 'zedx', 'zedxm', 'zedxhdr', 'zedxhdrmini', 'zedxhdrmax', 'virtual', 'zedxonegs', 'zedxone4k', 'zedxonehdr']), + DeclareLaunchArgument( + 'container_name', + default_value='', + description='The name of the container to be used to load the ZED component. If empty (default) a new container will be created.'), + DeclareLaunchArgument( + 'namespace', + default_value='', + description='The namespace of the node. If empty (default) the camera name is used.'), + DeclareLaunchArgument( + 'node_name', + default_value='zed_node', + description='The name of the zed_wrapper node. All the topic will have the same prefix: `///`. If a namespace is specified, the node name is replaced by the camera name.'), + DeclareLaunchArgument( + 'ros_params_override_path', + default_value='', + description='The path to an additional parameters file to override the default values.'), + DeclareLaunchArgument( + 'object_detection_config_path', + default_value=TextSubstitution(text=default_object_detection_config_path), + description='Path to the YAML configuration file for the Object Detection parameters.'), + DeclareLaunchArgument( + 'custom_object_detection_config_path', + default_value=TextSubstitution(text=default_custom_object_detection_config_path), + description='Path to the YAML configuration file for the Custom Object Detection parameters.'), + DeclareLaunchArgument( + 'serial_number', + default_value='0', + description='The serial number of the camera to be opened. It is mandatory to use this parameter or camera ID in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the serial number of all the connected cameras.'), + DeclareLaunchArgument( + 'serial_numbers', + default_value='[]', + description='The serial numbers of the two cameras to be opened to compose a Virtual Stereo Camera, [left_sn,right_sn]. Use `ZED_Explorer -a` to retrieve the serial number of all the connected cameras.'), + DeclareLaunchArgument( + 'camera_id', + default_value='-1', + description='The ID of the camera to be opened. It is mandatory to use this parameter or serial number in multi-camera rigs to distinguish between different cameras. Use `ZED_Explorer -a` to retrieve the ID of all the connected cameras.'), + DeclareLaunchArgument( + 'camera_ids', + default_value='[]', + description='The IDs of the two cameras to be opened to compose a Virtual Stereo Camera, [left_id,right_id]. Use `ZED_Explorer -a` to retrieve the ID of all the connected cameras.'), + DeclareLaunchArgument( + 'publish_urdf', + default_value='true', + description='Enable URDF processing and starts Robot State Published to propagate static TF.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_tf', + default_value='true', + description='Enable publication of the `odom -> camera_link` TF.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_map_tf', + default_value='true', + description='Enable publication of the `map -> odom` TF. Note: Ignored if `publish_tf` is False.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'publish_imu_tf', + default_value='false', + description='Enable publication of the IMU TF. Note: Ignored if `publish_tf` is False.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'xacro_path', + default_value=TextSubstitution(text=default_xacro_path), + description='Path to the camera URDF file as a xacro file.'), + DeclareLaunchArgument( + 'svo_path', + default_value=TextSubstitution(text='live'), + description='Path to an input SVO file.'), + DeclareLaunchArgument( + 'publish_svo_clock', + default_value='false', + description='If set to `true` the node will act as a clock server publishing the SVO timestamp. This is useful for node synchronization'), + DeclareLaunchArgument( + 'enable_gnss', + default_value='false', + description='Enable GNSS fusion to fix positional tracking pose with GNSS data from messages of type `sensor_msgs::msg::NavSatFix`. The fix topic can be customized in `common_stereo.yaml`.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'gnss_antenna_offset', + default_value='[]', + description='Position of the GNSS antenna with respect to the mounting point of the ZED camera. Format: [x,y,z]'), + DeclareLaunchArgument( + 'enable_ipc', + default_value='true', + description='Enable intra-process communication (IPC) with ROS 2 Composition', + choices=['true', 'false']), + DeclareLaunchArgument( + 'ipc_nitros_conflict_policy', + default_value='disable_ipc', + description='Behavior when IPC is enabled and NITROS is enabled (`debug.disable_nitros:=false`). ' + '`disable_ipc`: force IPC off for this launch. ' + '`exit`: stop launch with an error.', + choices=['disable_ipc', 'exit']), + DeclareLaunchArgument( + 'use_sim_time', + default_value='false', + description='If set to `true` the node will wait for messages on the `/clock` topic to start and will use this information as the timestamp reference', + choices=['true', 'false']), + DeclareLaunchArgument( + 'sim_mode', + default_value='false', + description='Enable simulation mode. Set `sim_address` and `sim_port` to configure the simulator input.', + choices=['true', 'false']), + DeclareLaunchArgument( + 'sim_address', + default_value='127.0.0.1', + description='The connection address of the simulation server. See the documentation of the supported simulation plugins for more information.'), + DeclareLaunchArgument( + 'sim_port', + default_value='30000', + description='The connection port of the simulation server. See the documentation of the supported simulation plugins for more information.'), + DeclareLaunchArgument( + 'stream_address', + default_value='', + description='The connection address of the input streaming server.'), + DeclareLaunchArgument( + 'stream_port', + default_value='30000', + description='The connection port of the input streaming server.'), + DeclareLaunchArgument( + 'param_overrides', + default_value='', + description='Inline parameter overrides as semicolon-separated key:=value pairs. ' + 'These have the highest priority and override all YAML and launch argument values. ' + 'Example: "debug.disable_nitros:=true;object_detection.custom_onnx_file:=/path/to/model.onnx"'), + OpaqueFunction(function=launch_setup) + ] + ) diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/package.xml b/src/lib/zed-ros2-wrapper/zed_wrapper/package.xml new file mode 100644 index 000000000..a081ea434 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/package.xml @@ -0,0 +1,38 @@ + + + + zed_wrapper + 5.2.2 + zed_wrapper loading zed_components in a single process + STEREOLABS + Apache License 2.0 + https://www.stereolabs.com/ + https://github.com/stereolabs/zed-ros2-wrapper + https://github.com/stereolabs/zed-ros2-wrapper/issues + + ament_cmake + + ament_cmake_auto + + zed_components + + launch_ros + zed_description + xacro + image_transport_plugins + compressed_image_transport + compressed_depth_image_transport + theora_image_transport + + ament_lint_auto + ament_cmake_copyright + ament_cmake_cppcheck + ament_cmake_lint_cmake + ament_cmake_pep257 + ament_cmake_uncrustify + ament_cmake_xmllint + + + ament_cmake + + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_descr.urdf.xacro b/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_descr.urdf.xacro new file mode 100644 index 000000000..213af56fd --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_descr.urdf.xacro @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + diff --git a/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_macro.urdf.xacro b/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_macro.urdf.xacro new file mode 100644 index 000000000..bcb065b27 --- /dev/null +++ b/src/lib/zed-ros2-wrapper/zed_wrapper/urdf/zed_macro.urdf.xacro @@ -0,0 +1,315 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +