4. Python examples

These examples are all shipped with the owl python package. To find them, navigate to the owl directory of your installation and look for them in the examples directory.

The examples are located inside:

/home/ramona/falcongui/lib/python3.1/site-packages/owl/examples

Synchronous acquisition

 1"""This script shows you how to acquire an image and save it to a folder
 2using the built in transmission illumination board for illumination.
 3
 4Ramona Optics, Inc.
 5Copyright 2018-2025
 6"""
 7from owl import mcam_data
 8from owl.instruments import MCAM
 9from owl.util import sys_info as owl_sys_info
10
11print(owl_sys_info())
12
13# Open the mcam
14mcam = MCAM(serial_number='Kestrel0101R')
15
16# 100 ms is the default exposure
17mcam.exposure = 0.1
18mcam.set_illumination_brightness(.5)
19
20dataset = mcam.acquire_full_field_of_view()
21# Save the data
22raw_data_file = mcam_data.save(dataset, 'demo')
23
24# Close after the data has been acquired and saved
25mcam.close()

Controlling Fluorescence Units Programmatically

 1"""
 2This script shows how to control the fluorescence illumination controlled
 3digitally in a script.
 4It requires owl version 0.9.30 or greater
 5
 6Copyright Ramona Optics Inc, 2019-2025. All rights reserved.
 7"""
 8from owl.instruments import MCAM
 9
10mcam = MCAM(serial_number='Kestrel0053R')
11fluorescence = mcam.fluorescence_illumination
12
13# Enable the first LED (or the only LED if only one is connected)
14fluorescence.led_selected = 0
15
16# Set to 25 percent power
17fluorescence.led_power_percentage = 25
18
19# Set to 60 percent power
20fluorescence.led_power_percentage = 60
21
22# Set to 0 percent power
23# This makes the LEDs appear off.
24# Power below 10 percent may also manifest itself as being off
25fluorescence.led_power_percentage = 0
26
27# Turn off and deselect the LEDs (safer, more proper way of turning off LEDs)
28fluorescence.clear()
29
30# Read the temperature recorded by all the temperature sensors in the system
31temperatures = fluorescence.temperatures

Z Stack acquisition with Web API

 1"""Z stack acquisition with the Ramona Optics MCAM using the web API.
 2
 3This example demonstrates how to acquire a Z stack acquisition using the MCAM web API.
 4
 5The Z stack assay allows for the acquisition of a series of images at different focal planes.
 6The assay is configured using json with the following parameters.
 7
 8The `save_location` parameter specifies the location where the data will be saved on the
 9MCAM computer or NAS (Network Attached Storage).
10
11The `metadata` parameter is a dictionary that can be used to store any additional information
12you would like to associate with the assay.
13
14The `parameters` parameter is a dictionary that contains the parameters for the assay.
15A full list of parameters can be found in the MCAM documentation.
16https://docs.ramonaoptics.com/models.html#z-stack-assay
17
18The `configuration` parameter is a string that specifies the file path to a configuration file
19generated by the MCAM software.
20This file contains the parameters for the assay and is generally stored on the MCAM computer
21or on a NAS.
22
23Ramona Optics, Inc. Copyright 2023-2025
24"""
25import requests
26
27# Change the serial number to the one provided to you by Ramona Optics
28serial_number = "Kestrel0001"
29url = "http://localhost:8800"
30
31# Connect to the MCAM device using the webAPI endpoint
32requests.post(f"{url}/v1/mcam/{serial_number}", timeout=10)
33
34# Set the MCAM device to sample loading state, which will extend the plate nest
35requests.post(
36    f"{url}/v1/mcam/{serial_number}/state",
37    json={"state": "sample_loading"},
38    timeout=10,
39)
40
41# Set the MCAM device to the acquisition state, which will insert the plate nest
42requests.post(
43    f"{url}/v1/mcam/{serial_number}/state",
44    json={"state": "acquisition"},
45    timeout=10,
46)
47
48assay_parameters = {
49    "save_location": "/MCAM_data/my_z_stack_acquisition/",
50    "metadata": {
51        "operator": "Mike Roscopy"
52    },
53    "parameters": {
54        "save_mode": "export",
55        "z_positions": [1E-3, 2E-3, 3E-3]
56    }
57}
58# Start the Z stack plate scan acquisition
59response = requests.post(
60    f"{url}/v1/mcam/{serial_number}/assay/z_stack",
61    json=assay_parameters,
62    # This assay can take more than 10 seconds if saving to slow storage
63    timeout=30,
64)
65
66# Set the MCAM device to sample loading state, which will extend the plate nest
67requests.post(
68    f"{url}/v1/mcam/{serial_number}/state",
69    json={"state": "sample_loading"},
70    timeout=10
71)
72
73# Disconnect from the MCAM device
74requests.delete(
75    f"{url}/v1/mcam/{serial_number}",
76    timeout=10
77)

Exporting Analysis Masks from Zebrafish Segmentation Metadata

 1"""
 2This script loads an MCAM dataset containing masks and exports
 3each mask as a separate compressed TIFF file.
 4
 5The output filenames are automatically
 6generated based on the dataset structure (wells or image grid positions).
 7
 8Masks are saved with zlib compression to reduce file size while maintaining
 9lossless quality and imageJ compatibility.
10"""
11from pathlib import Path
12
13import numpy as np
14import tifffile
15from tqdm import tqdm
16
17from owl import mcam_data
18from owl.analysis.util import parse_multi_overlapping_mask_array
19
20dataset_path = Path("/path/to/your/dataset/segmentation_results/segmentation_dataset.nc")
21output_path = Path("/path/to/output/directory/")
22
23dataset = mcam_data.load(dataset_path)
24
25assert 'segmentation_regions' in dataset, "No mask data found in the dataset."
26
27# masks are stored in a compressed format, make sure to
28# decompress with parse_multi_overlapping_mask_array here
29masks = parse_multi_overlapping_mask_array(dataset['segmentation_regions'])
30mask_names = dataset['segmentation_region_names']
31
32
33def get_mask_filename(dataset, image_idx):
34    has_wells = mcam_data.is_well_dataset(dataset)
35
36    if has_wells:
37        well_id = dataset.well_id[image_idx].data
38        return f'well_{well_id}_mask.tif'
39    else:
40        image_y, image_x = image_idx
41        return f'{image_y}_{image_x}_mask.tif'
42
43
44N_masks = masks.shape[0]
45array_shape = masks.shape[1:3]
46N_images = np.prod(array_shape)
47
48# make a progress bar combining the number of masks and number of images
49tqdm = tqdm(total=N_masks * N_images, desc="Exporting masks")
50
51for mask_idx in range(N_masks):
52    mask_name = str(mask_names[mask_idx].data)
53    mask_directory = output_path / mask_name
54    mask_directory.mkdir(parents=True, exist_ok=True)
55
56    for image_idx in np.ndindex(array_shape):
57        filename = get_mask_filename(dataset, image_idx)
58        mask_data = np.asarray(masks[(mask_idx,) + image_idx])
59        tifffile.imwrite(mask_directory / filename, mask_data, compression='zlib')
60
61        tqdm.update(1)

Exporting Analysis Masks from Vireo Segmentation Metadata

 1"""
 2This script loads an MCAM dataset containing masks and exports
 3each mask as a separate compressed TIFF file.
 4
 5The output filenames are automatically
 6generated based on the dataset structure (wells/fields or image grid positions).
 7
 8Masks are saved with zlib compression to reduce file size while maintaining
 9lossless quality and imageJ compatibility.
10"""
11
12from pathlib import Path
13
14import numpy as np
15import tifffile
16from tqdm import tqdm
17
18from owl import mcam_data
19
20dataset_path = Path("path/to/your/dataset/analysis_metadata.nc")
21output_path = Path("path/to/output/directory/")
22output_path.mkdir(parents=True, exist_ok=True)
23
24# The settings type of your masks can be found in the protocol file of your analysis
25# in the "__owl_settings_type__" key.
26settings_type = 'automatic_segmentation'
27
28dataset = mcam_data.load(dataset_path)
29
30assert f'{settings_type}_masks' in dataset, "No mask data found in the dataset."
31masks = dataset[f'{settings_type}_masks']
32
33
34def get_mask_filename(dataset, idx):
35    has_wells = mcam_data.is_well_dataset(dataset)
36    has_fields = mcam_data.using_field_id(dataset)
37
38    if has_wells and has_fields:
39        well_id = dataset.well_id[idx].data
40        field_id = int(dataset.field_id[idx].data)
41        return f'well_{well_id}_field_{field_id}_mask.tif'
42    elif has_wells:
43        well_id = dataset.well_id[idx].data
44        return f'well_{well_id}_mask.tif'
45    else:
46        image_y, image_x = idx
47        return f'{image_y}_{image_x}_mask.tif'
48
49
50image_tiles = list(np.ndindex(masks.shape[:2]))
51for idx in tqdm(image_tiles, desc="Exporting masks"):
52    filename = get_mask_filename(dataset, idx)
53    mask_data = np.asarray(masks[idx])
54    tifffile.imwrite(output_path / filename, mask_data, compression='zlib')

Creating a Timelapse Dataset from Individual Acquisitions

 1from pathlib import Path
 2
 3import click
 4import xarray as xr
 5from tqdm import tqdm
 6
 7"""
 8This script creates a timelapse metadata file from individual acquisition metadata files
 9in a specified directory. It renames acquisition directories to a standardized format
10and concatenates their metadata into a single file. Timepoints will be organized by
11acquisitions names in alphabetical order. Acquisition directory names will be over
12written to `acquisition_0`, `acquisition_1`, `acquisition_2`, and so on with the
13necessary padding so that files are organized in chronological order when sorted
14alphabetically.
15
16WARNING: This will rename directories
17and overwrite any existing metadata.nc file in the target directory, so please make a copy
18of the original files before running this script if needed. To avoid accidental overwriting,
19the script will prompt for user confirmation if directories need to be renamed.
20
21Usage:
22    python create_timelapse_from_single_acquisitions.py /path/to/timelapse_directory
23"""
24
25
26@click.command()
27@click.argument(
28    "timelapse_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path)
29)
30@click.help_option('-h', '--help')
31def create_timelapse_metadata(timelapse_dir):
32    """
33    Create a metadata file for a timelapse dataset.
34
35    Parameters
36    ----------
37    timelapse_dir : Path
38        Path to the timelapse directory.
39    """
40    # check if timelapse metadata file already exists
41    if (timelapse_dir / 'metadata.nc').exists():
42        raise ValueError(
43            f"The metadata file {timelapse_dir / 'metadata.nc'} would be overwritten. "
44            "Please remove it or rename it."
45        )
46
47    # Check if the directory contains metadata files
48    metadata_files = list(timelapse_dir.glob('*/metadata.nc'))
49    if not metadata_files:
50        raise ValueError(f"No metadata files found in the directory {timelapse_dir}.")
51    metadatas = []
52
53    acquisition_dirs = sorted([d.parent.stem for d in metadata_files])
54
55    p = len(str(len(acquisition_dirs)))
56
57    will_rename_files = False
58    for i, acquisition_dir in enumerate(acquisition_dirs):
59        new_acquisition_dir = timelapse_dir / f'acquisition_{i:0{p}d}'
60        if new_acquisition_dir != timelapse_dir / acquisition_dir and new_acquisition_dir.exists():
61            raise ValueError(
62                f"Directory {new_acquisition_dir} would be overwritten. "
63                "Please remove it or rename it."
64            )
65        if new_acquisition_dir != timelapse_dir / acquisition_dir:
66            will_rename_files = True
67
68    if will_rename_files:
69        # get user consent to continue
70        proceed = input("directories will be renamed. Do you want to continue? (y/n): ")
71        if proceed.lower() != 'y':
72            print("Exiting without making any changes.")
73            return
74
75    for i, acquisition_dir in enumerate(tqdm(acquisition_dirs)):
76        new_acquisition_dir = timelapse_dir / f'acquisition_{i:0{p}d}'
77        (timelapse_dir / acquisition_dir).rename(new_acquisition_dir)
78        metadata_filename = new_acquisition_dir / 'metadata.nc'
79        dataset = xr.open_dataset(metadata_filename).compute()
80        dataset.coords['timelapse_index'] = i
81        dataset['software_timestamp'] = dataset['software_timestamp'].expand_dims('timelapse_index')
82        metadatas.append(dataset)
83
84    final = xr.concat(
85        metadatas, dim='timelapse_index', coords='minimal', data_vars='minimal', compat='override'
86    )
87    final.attrs['images_dims'] = ['timelapse_index'] + final.attrs['images_dims']
88    final.attrs['image_export_tile_dims'] = [
89        'timelapse_index',
90    ] + final.attrs['image_export_tile_dims']
91    final.attrs['imagename_format'] = (
92        'acquisition_{timelapse_index:0' + str(p) + 'd}/' + final.attrs['imagename_format']
93    )
94    final.to_netcdf(timelapse_dir / 'metadata.nc', engine='h5netcdf')
95
96
97if __name__ == '__main__':
98    create_timelapse_metadata()