From 085313221d924d5969ad1842feb4e30f298ce8f0 Mon Sep 17 00:00:00 2001 From: builder Date: Tue, 10 Feb 2026 15:01:53 +0000 Subject: [PATCH] Samples: Automatic updates to public repository Remember to do the following: 1. Ensure that modified/deleted/new files are correct 2. Make this commit message relevant for the changes 3. Force push 4. Delete branch after PR is merged If this commit is an update from one SDK version to another, make sure to create a release tag for previous version. --- .../multi_camera/multi_camera_calibration.py | 29 ++++++- .../multi_camera/stitch_by_transformation.py | 79 ++++++++++++++++--- .../stitch_by_transformation_from_zdf.py | 8 +- source/applications/point_cloud_tutorial.md | 10 +-- 4 files changed, 105 insertions(+), 21 deletions(-) diff --git a/source/applications/advanced/multi_camera/multi_camera_calibration.py b/source/applications/advanced/multi_camera/multi_camera_calibration.py index c1af64c8..f6e8383d 100644 --- a/source/applications/advanced/multi_camera/multi_camera_calibration.py +++ b/source/applications/advanced/multi_camera/multi_camera_calibration.py @@ -16,17 +16,36 @@ def _args() -> argparse.Namespace: Returns: Arguments from the user + """ parser = argparse.ArgumentParser(description="Multi-camera calibration using Zivid cameras.") + parser.add_argument( "transformation_matrices_save_path", + type=Path, help="Path where the transformation matrices YAML files will be saved", + ) + + parser.add_argument( + "--settings-path", + required=False, type=Path, + help="Path to the camera settings YML file", ) + return parser.parse_args() def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]: + """get a list of available cameras and connect to them. + + Args: + cameras: List of Zivid cameras + + Returns: + List of connected Zivid cameras + + """ connected_cameras = [] for camera in cameras: if camera.state.status == zivid.CameraState.Status.available: @@ -44,12 +63,16 @@ class Detection: detection_result: zivid.calibration.DetectionResult -def get_detections(connected_cameras: List[zivid.Camera]) -> List[Detection]: +def get_detections(connected_cameras: List[zivid.Camera], settings_path: Path) -> List[Detection]: detections_list = [] for camera in connected_cameras: serial = camera.info.serial_number print(f"Capturing frame with camera: {serial}") - frame = zivid.calibration.capture_calibration_board(camera) + if settings_path is None: + frame = zivid.calibration.capture_calibration_board(camera) + else: + settings = zivid.Settings.load(settings_path) + frame = camera.capture_2d_3d(settings) print("Detecting checkerboard in point cloud") detection_result = zivid.calibration.detect_calibration_board(frame) if detection_result: @@ -93,7 +116,7 @@ def main() -> None: raise RuntimeError("At least two cameras need to be connected") print(f"Number of connected cameras: {len(connected_cameras)}") - detections = get_detections(connected_cameras) + detections = get_detections(connected_cameras, args.settings_path) run_multi_camera_calibration(detections, args.transformation_matrices_save_path) diff --git a/source/applications/advanced/multi_camera/stitch_by_transformation.py b/source/applications/advanced/multi_camera/stitch_by_transformation.py index f5eb3de9..cb7ae164 100644 --- a/source/applications/advanced/multi_camera/stitch_by_transformation.py +++ b/source/applications/advanced/multi_camera/stitch_by_transformation.py @@ -25,19 +25,74 @@ def _user_arguments() -> argparse.Namespace: parser = argparse.ArgumentParser( description="Stitch point clouds from multiple Zivid cameras using transformation matrices." ) + parser.add_argument( "yaml_files", type=Path, nargs="+", help="YAML files containing the corresponding transformation matrices (one per camera).", ) + + parser.add_argument( + "-o", + "--output-file", + required=False, + type=Path, + help="Save the stitched point cloud to a file with this name (.ply)", + ) + parser.add_argument( - "-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)" + "--settings-path", + required=False, + type=Path, + help="Path to the camera settings YML file", ) + return parser.parse_args() +def sanitized_model_name(camera: zivid.Camera) -> str: + """Get a string that represents the camera model name. + + Args: + camera: Zivid camera + + Raises: + RuntimeError: If unsupported camera model for this code sample + + Returns: + A string representing the camera model name + + """ + model = camera.info.model + + model_map = { + zivid.CameraInfo.Model.zividTwo: "Zivid_Two_M70", + zivid.CameraInfo.Model.zividTwoL100: "Zivid_Two_L100", + zivid.CameraInfo.Model.zivid2PlusM130: "Zivid_Two_Plus_M130", + zivid.CameraInfo.Model.zivid2PlusM60: "Zivid_Two_Plus_M60", + zivid.CameraInfo.Model.zivid2PlusL110: "Zivid_Two_Plus_L110", + zivid.CameraInfo.Model.zivid2PlusMR130: "Zivid_Two_Plus_MR130", + zivid.CameraInfo.Model.zivid2PlusMR60: "Zivid_Two_Plus_MR60", + zivid.CameraInfo.Model.zivid2PlusLR110: "Zivid_Two_Plus_LR110", + zivid.CameraInfo.Model.zivid3XL250: "Zivid_Three_XL250", + } + if model not in model_map: + raise RuntimeError(f"Unhandled camera model: {camera.info().model().to_string()}") + + return model_map[model] + + def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]: + """get a list of available cameras and connect to them. + + Args: + cameras: List of Zivid cameras + + Returns: + List of connected Zivid cameras + + """ connected_cameras = [] for camera in cameras: if camera.state.status == zivid.CameraState.Status.available: @@ -60,10 +115,11 @@ def get_transformation_matrices_from_yaml( cameras: List of connected Zivid cameras Returns: - A dictionary mapping camera serial numbers to their corresponding transformation matrices + transforms_mapped_to_cameras: A dictionary mapping camera serial numbers to their corresponding transformation matrices Raises: RuntimeError: If a YAML file for a camera is missing + """ transforms_mapped_to_cameras = {} for camera in cameras: @@ -100,11 +156,12 @@ def main() -> None: # DOCTAG-START-CAPTURE-AND-STITCH-POINT-CLOUDS-PART1 for camera in connected_cameras: - settings_path = ( - get_sample_data_path() - / "Settings" - / f"{camera.info.model_name.replace('2+', 'Two_Plus').replace('2', 'Two').replace('3', 'Three').replace(' ', '_')}_ManufacturingSpecular.yml" - ) + if args.settings_path is not None: + settings_path = args.settings_path + else: + settings_path = ( + get_sample_data_path() / "Settings" / f"{sanitized_model_name(camera)}_ManufacturingSpecular.yml" + ) print(f"Imaging from camera: {camera.info.serial_number}") frame = camera.capture(zivid.Settings.load(settings_path)) unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud() @@ -115,13 +172,13 @@ def main() -> None: print("Voxel-downsampling the stitched point cloud") final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1) - print(f"Visualizing the stitched point cloud ({len(final_point_cloud.size())} data points)") + print(f"Visualizing the stitched point cloud ({final_point_cloud.size} data points)") display_pointcloud(final_point_cloud) - if args.output_file: - print(f"Saving {len(final_point_cloud.size())} data points to {args.output_file}") + if args.output_file is not None: + print(f"Saving {final_point_cloud.size} data points to {args.output_file}") export_unorganized_point_cloud( - final_point_cloud, PLY(args.output_file, layout=PLY.Layout.unordered, color_space=ColorSpace.srgb) + final_point_cloud, PLY(str(args.output_file), layout=PLY.Layout.unordered, color_space=ColorSpace.srgb) ) diff --git a/source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py b/source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py index 3f6b1b08..af85312d 100644 --- a/source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py +++ b/source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py @@ -30,7 +30,11 @@ def _user_arguments() -> argparse.Namespace: type=Path, ) parser.add_argument( - "-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)." + "-o", + "--output-file", + required=False, + type=Path, + help="Save the stitched point cloud to a file with this name (.ply).", ) return parser.parse_args() @@ -95,7 +99,7 @@ def main() -> None: print(f"Visualizing the stitched point cloud ({final_point_cloud.size}) data points)") display_point_cloud(final_point_cloud) - if args.output_file: + if args.output_file is not None: print(f"Saving {final_point_cloud.size} data points to {args.output_file}") export_unorganized_point_cloud( final_point_cloud, diff --git a/source/applications/point_cloud_tutorial.md b/source/applications/point_cloud_tutorial.md index 8c44d395..b4ab9841 100644 --- a/source/applications/point_cloud_tutorial.md +++ b/source/applications/point_cloud_tutorial.md @@ -143,7 +143,7 @@ point cloud. While doing so, all NaN values are removed, and the point cloud is flattened to a 1D array. ([go to -source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L109)) +source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L166)) ``` sourceCode python unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud() @@ -155,7 +155,7 @@ The unorganized point cloud can be extended with additional unorganized point clouds. ([go to -source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76)) +source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80)) ``` sourceCode python stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix)) @@ -227,7 +227,7 @@ that in this sample is is not necessary to create a new instance, as the untransformed point cloud is not used after the transformation. ([go to -source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L111)) +source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L168)) ``` sourceCode python transformed_unorganized_point_cloud = unorganized_point_cloud.transformed(transformation_matrix) @@ -237,7 +237,7 @@ Even the in-place API returns the transformed point cloud, so you can use it directly, as in the example below. ([go to -source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76)) +source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80)) ``` sourceCode python stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix)) @@ -327,7 +327,7 @@ minPointsPerVoxel can be used to only fill voxels that both captures "agree" on. ([go to -source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L115)) +source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L172)) ``` sourceCode python final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)