Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,36 @@ def _args() -> argparse.Namespace:

Returns:
Arguments from the user

"""
parser = argparse.ArgumentParser(description="Multi-camera calibration using Zivid cameras.")

parser.add_argument(
"transformation_matrices_save_path",
type=Path,
help="Path where the transformation matrices YAML files will be saved",
)

parser.add_argument(
"--settings-path",
required=False,
type=Path,
help="Path to the camera settings YML file",
)

return parser.parse_args()


def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
"""get a list of available cameras and connect to them.

Args:
cameras: List of Zivid cameras

Returns:
List of connected Zivid cameras

"""
connected_cameras = []
for camera in cameras:
if camera.state.status == zivid.CameraState.Status.available:
Expand All @@ -44,12 +63,16 @@ class Detection:
detection_result: zivid.calibration.DetectionResult


def get_detections(connected_cameras: List[zivid.Camera]) -> List[Detection]:
def get_detections(connected_cameras: List[zivid.Camera], settings_path: Path) -> List[Detection]:
detections_list = []
for camera in connected_cameras:
serial = camera.info.serial_number
print(f"Capturing frame with camera: {serial}")
frame = zivid.calibration.capture_calibration_board(camera)
if settings_path is None:
frame = zivid.calibration.capture_calibration_board(camera)
else:
settings = zivid.Settings.load(settings_path)
frame = camera.capture_2d_3d(settings)
print("Detecting checkerboard in point cloud")
detection_result = zivid.calibration.detect_calibration_board(frame)
if detection_result:
Expand Down Expand Up @@ -93,7 +116,7 @@ def main() -> None:
raise RuntimeError("At least two cameras need to be connected")
print(f"Number of connected cameras: {len(connected_cameras)}")

detections = get_detections(connected_cameras)
detections = get_detections(connected_cameras, args.settings_path)
run_multi_camera_calibration(detections, args.transformation_matrices_save_path)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,19 +25,74 @@ def _user_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Stitch point clouds from multiple Zivid cameras using transformation matrices."
)

parser.add_argument(
"yaml_files",
type=Path,
nargs="+",
help="YAML files containing the corresponding transformation matrices (one per camera).",
)

parser.add_argument(
"-o",
"--output-file",
required=False,
type=Path,
help="Save the stitched point cloud to a file with this name (.ply)",
)

parser.add_argument(
"-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)"
"--settings-path",
required=False,
type=Path,
help="Path to the camera settings YML file",
)

return parser.parse_args()


def sanitized_model_name(camera: zivid.Camera) -> str:
"""Get a string that represents the camera model name.

Args:
camera: Zivid camera

Raises:
RuntimeError: If unsupported camera model for this code sample

Returns:
A string representing the camera model name

"""
model = camera.info.model

model_map = {
zivid.CameraInfo.Model.zividTwo: "Zivid_Two_M70",
zivid.CameraInfo.Model.zividTwoL100: "Zivid_Two_L100",
zivid.CameraInfo.Model.zivid2PlusM130: "Zivid_Two_Plus_M130",
zivid.CameraInfo.Model.zivid2PlusM60: "Zivid_Two_Plus_M60",
zivid.CameraInfo.Model.zivid2PlusL110: "Zivid_Two_Plus_L110",
zivid.CameraInfo.Model.zivid2PlusMR130: "Zivid_Two_Plus_MR130",
zivid.CameraInfo.Model.zivid2PlusMR60: "Zivid_Two_Plus_MR60",
zivid.CameraInfo.Model.zivid2PlusLR110: "Zivid_Two_Plus_LR110",
zivid.CameraInfo.Model.zivid3XL250: "Zivid_Three_XL250",
}
if model not in model_map:
raise RuntimeError(f"Unhandled camera model: {camera.info().model().to_string()}")

return model_map[model]


def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
"""get a list of available cameras and connect to them.

Args:
cameras: List of Zivid cameras

Returns:
List of connected Zivid cameras

"""
connected_cameras = []
for camera in cameras:
if camera.state.status == zivid.CameraState.Status.available:
Expand All @@ -60,10 +115,11 @@ def get_transformation_matrices_from_yaml(
cameras: List of connected Zivid cameras

Returns:
A dictionary mapping camera serial numbers to their corresponding transformation matrices
transforms_mapped_to_cameras: A dictionary mapping camera serial numbers to their corresponding transformation matrices

Raises:
RuntimeError: If a YAML file for a camera is missing

"""
transforms_mapped_to_cameras = {}
for camera in cameras:
Expand Down Expand Up @@ -100,11 +156,12 @@ def main() -> None:

# DOCTAG-START-CAPTURE-AND-STITCH-POINT-CLOUDS-PART1
for camera in connected_cameras:
settings_path = (
get_sample_data_path()
/ "Settings"
/ f"{camera.info.model_name.replace('2+', 'Two_Plus').replace('2', 'Two').replace('3', 'Three').replace(' ', '_')}_ManufacturingSpecular.yml"
)
if args.settings_path is not None:
settings_path = args.settings_path
else:
settings_path = (
get_sample_data_path() / "Settings" / f"{sanitized_model_name(camera)}_ManufacturingSpecular.yml"
)
print(f"Imaging from camera: {camera.info.serial_number}")
frame = camera.capture(zivid.Settings.load(settings_path))
unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
Expand All @@ -115,13 +172,13 @@ def main() -> None:
print("Voxel-downsampling the stitched point cloud")
final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)

print(f"Visualizing the stitched point cloud ({len(final_point_cloud.size())} data points)")
print(f"Visualizing the stitched point cloud ({final_point_cloud.size} data points)")
display_pointcloud(final_point_cloud)

if args.output_file:
print(f"Saving {len(final_point_cloud.size())} data points to {args.output_file}")
if args.output_file is not None:
print(f"Saving {final_point_cloud.size} data points to {args.output_file}")
export_unorganized_point_cloud(
final_point_cloud, PLY(args.output_file, layout=PLY.Layout.unordered, color_space=ColorSpace.srgb)
final_point_cloud, PLY(str(args.output_file), layout=PLY.Layout.unordered, color_space=ColorSpace.srgb)
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,11 @@ def _user_arguments() -> argparse.Namespace:
type=Path,
)
parser.add_argument(
"-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)."
"-o",
"--output-file",
required=False,
type=Path,
help="Save the stitched point cloud to a file with this name (.ply).",
)
return parser.parse_args()

Expand Down Expand Up @@ -95,7 +99,7 @@ def main() -> None:
print(f"Visualizing the stitched point cloud ({final_point_cloud.size}) data points)")
display_point_cloud(final_point_cloud)

if args.output_file:
if args.output_file is not None:
print(f"Saving {final_point_cloud.size} data points to {args.output_file}")
export_unorganized_point_cloud(
final_point_cloud,
Expand Down
10 changes: 5 additions & 5 deletions source/applications/point_cloud_tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ point cloud. While doing so, all NaN values are removed, and the point
cloud is flattened to a 1D array.

([go to
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L109))
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L166))

``` sourceCode python
unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
Expand All @@ -155,7 +155,7 @@ The unorganized point cloud can be extended with additional unorganized
point clouds.

([go to
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76))
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80))

``` sourceCode python
stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix))
Expand Down Expand Up @@ -227,7 +227,7 @@ that in this sample is is not necessary to create a new instance, as the
untransformed point cloud is not used after the transformation.

([go to
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L111))
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L168))

``` sourceCode python
transformed_unorganized_point_cloud = unorganized_point_cloud.transformed(transformation_matrix)
Expand All @@ -237,7 +237,7 @@ Even the in-place API returns the transformed point cloud, so you can
use it directly, as in the example below.

([go to
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L76))
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py#L80))

``` sourceCode python
stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix))
Expand Down Expand Up @@ -327,7 +327,7 @@ minPointsPerVoxel can be used to only fill voxels that both captures
"agree" on.

([go to
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L115))
source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/advanced/multi_camera/stitch_by_transformation.py#L172))

``` sourceCode python
final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)
Expand Down