Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python] Fix MultiscaleImage.read_spatial_region full region read #3128

Merged
merged 1 commit into from
Oct 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 39 additions & 23 deletions apis/python/src/tiledbsoma/_spatial_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,46 +113,62 @@


def process_image_region(
region: options.SpatialRegion,
region: Optional[options.SpatialRegion],
transform: somacore.CoordinateTransform,
channel_coords: options.DenseCoord,
image_type: str,
) -> Tuple[options.DenseNDCoords, options.SpatialRegion, somacore.CoordinateTransform]:
# Get the transformed region the user is selecting in the data space.
# Note: transform region verifies only 2D data. This function is hard-coded to
# make the same assumption.
data_region = transform_region(region, transform)

# Convert the region to a bounding box. Round values of bounding box to integer
# values. Include any partially intersected pixels.
(x_min, y_min, x_max, y_max) = shapely.bounds(data_region)
x_min = max(0, int(np.floor(x_min)))
y_min = max(0, int(np.floor(y_min)))
x_max = int(np.ceil(x_max))
y_max = int(np.ceil(y_max))
) -> Tuple[
options.DenseNDCoords, Optional[options.SpatialRegion], somacore.CoordinateTransform
]:

if region is None:
# Select the full region.
data_region: Optional[options.SpatialRegion] = None
x_coords: options.DenseCoord = None
y_coords: options.DenseCoord = None
else:
# Get the transformed region the user is selecting in the data space.
# Note: transform region verifies only 2D data. This function is hard-coded to
# make the same assumption.
data_region = transform_region(region, transform)

Check warning on line 133 in apis/python/src/tiledbsoma/_spatial_util.py

View check run for this annotation

Codecov / codecov/patch

apis/python/src/tiledbsoma/_spatial_util.py#L133

Added line #L133 was not covered by tests

# Convert the region to a bounding box. Round values of bounding box to integer
# values. Include any partially intersected pixels.
(x_min, y_min, x_max, y_max) = shapely.bounds(data_region)
x_min = max(0, int(np.floor(x_min)))
y_min = max(0, int(np.floor(y_min)))
x_max = int(np.ceil(x_max))
y_max = int(np.ceil(y_max))
x_coords = slice(x_min, x_max)
y_coords = slice(y_min, y_max)

Check warning on line 143 in apis/python/src/tiledbsoma/_spatial_util.py

View check run for this annotation

Codecov / codecov/patch

apis/python/src/tiledbsoma/_spatial_util.py#L137-L143

Added lines #L137 - L143 were not covered by tests

# Translate the transform if the region does not start at the origin.
if x_min != 0 or y_min != 0:
translate = somacore.AffineTransform(

Check warning on line 147 in apis/python/src/tiledbsoma/_spatial_util.py

View check run for this annotation

Codecov / codecov/patch

apis/python/src/tiledbsoma/_spatial_util.py#L146-L147

Added lines #L146 - L147 were not covered by tests
transform.output_axes,
transform.output_axes,
np.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]]),
)

transform = translate @ transform

Check warning on line 153 in apis/python/src/tiledbsoma/_spatial_util.py

View check run for this annotation

Codecov / codecov/patch

apis/python/src/tiledbsoma/_spatial_util.py#L153

Added line #L153 was not covered by tests

# Get the inverse translation from the data space to the original requested region.
if x_min != 0 or y_min != 0:
translate = somacore.AffineTransform(
transform.output_axes,
transform.output_axes,
np.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]]),
)
transform = translate @ transform
inv_transform = transform.inverse_transform()

# Get the dense coordinates for querying the array storing the image.
coords: options.DenseNDCoords = []
for axis in image_type:
if axis == "C":
coords.append(channel_coords) # type: ignore[attr-defined]
if axis == "X":
coords.append(slice(x_min, x_max)) # type: ignore[attr-defined]
coords.append(x_coords) # type: ignore[attr-defined]
if axis == "Y":
coords.append(slice(y_min, y_max)) # type: ignore[attr-defined]
coords.append(y_coords) # type: ignore[attr-defined]
if axis == "Z":
raise NotImplementedError(
"Spatial queries are currently only supported for 2D coordinates."
)

return (coords, data_region, inv_transform)


Expand Down
78 changes: 78 additions & 0 deletions apis/python/tests/test_multiscale_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest

import tiledbsoma as soma
from tiledbsoma import ScaleTransform


def test_multiscale_image_bad_create(tmp_path):
Expand Down Expand Up @@ -120,3 +121,80 @@ def test_multiscale_basic_no_channels(tmp_path):
assert props.shape == shape
assert props.height == shape[0]
assert props.width == shape[1]


class TestSimpleMultiscale2D:

@pytest.fixture(scope="class")
def image_uri(self, tmp_path_factory):
"""Create a multiscale image and return the path."""
# Create the multiscale image.
baseuri = tmp_path_factory.mktemp("multiscale_image").as_uri()
image_uri = urljoin(baseuri, "default")
with soma.MultiscaleImage.create(
image_uri,
type=pa.uint8(),
reference_level_shape=(1, 9, 8),
axis_names=("c", "y", "x"),
axis_types=("channel", "height", "width"),
) as image:
coords = (slice(None), slice(None), slice(None))
# Create levels.
l0 = image.add_new_level("level0", shape=(1, 9, 8))
l0.write(
coords,
pa.Tensor.from_numpy(np.arange(72, dtype=np.uint8).reshape(1, 9, 8)),
)

# Create medium sized downsample.
l1 = image.add_new_level("level1", shape=(1, 6, 4))
l1.write(
coords,
pa.Tensor.from_numpy(
10 * np.arange(24, dtype=np.uint8).reshape(1, 6, 4)
),
)

# Create very small downsample and write to it.
l2 = image.add_new_level("level2", shape=(1, 3, 2))
l2.write(
coords,
pa.Tensor.from_numpy(
100 * np.arange(6, dtype=np.uint8).reshape(1, 3, 2)
),
)
return image_uri

@pytest.mark.parametrize(
("level", "region", "kwargs", "expected_data", "expected_transform"),
[
pytest.param(
2,
None,
{},
100 * np.arange(6, dtype=np.uint8).reshape(1, 3, 2),
ScaleTransform(("x", "y"), ("x", "y"), [4, 3]),
id="Level 2, full region, no transform",
),
],
)
def test_read_spatial_region(
self, image_uri, level, region, kwargs, expected_data, expected_transform
):
with soma.MultiscaleImage.open(image_uri) as image:
result = image.read_spatial_region(level=level, region=region, **kwargs)
actual_data = result.data.to_numpy()

# Check data
np.testing.assert_array_equal(actual_data, expected_data)

# Check transform
actual_transform = result.coordinate_transform
assert actual_transform.input_axes == expected_transform.input_axes
assert actual_transform.output_axes == expected_transform.output_axes
assert isinstance(actual_transform, type(expected_transform))
np.testing.assert_array_almost_equal(
actual_transform.augmented_matrix,
expected_transform.augmented_matrix,
decimal=8,
)
Loading