diff --git a/README.md b/README.md index ab3ad653..156ffb46 100644 --- a/README.md +++ b/README.md @@ -227,8 +227,11 @@ In the GIS world, rasters are used for representing continuous phenomena (e.g. e | [Aspect](xrspatial/aspect.py) | Computes downslope direction of each cell in degrees | ✅️ | ✅️ | ✅️ | ✅️ | | [Curvature](xrspatial/curvature.py) | Measures rate of slope change (concavity/convexity) at each cell | ✅️ |✅️ |✅️ | ✅️ | | [Hillshade](xrspatial/hillshade.py) | Simulates terrain illumination from a given sun angle and azimuth | ✅️ | ✅️ | ✅️ | ✅️ | +| [Roughness](xrspatial/terrain_metrics.py) | Computes local relief as max minus min elevation in a 3×3 window | ✅️ | ✅️ | ✅️ | ✅️ | | [Slope](xrspatial/slope.py) | Computes terrain gradient steepness at each cell in degrees | ✅️ | ✅️ | ✅️ | ✅️ | | [Terrain Generation](xrspatial/terrain.py) | Generates synthetic terrain elevation using fractal noise | ✅️ | ✅️ | ✅️ | ✅️ | +| [TPI](xrspatial/terrain_metrics.py) | Computes Topographic Position Index (center minus mean of neighbors) | ✅️ | ✅️ | ✅️ | ✅️ | +| [TRI](xrspatial/terrain_metrics.py) | Computes Terrain Ruggedness Index (local elevation variation) | ✅️ | ✅️ | ✅️ | ✅️ | | [Viewshed](xrspatial/viewshed.py) | Determines visible cells from a given observer point on terrain | ✅️ | ✅️ | ✅️ | ✅️ | | [Min Observable Height](xrspatial/experimental/min_observable_height.py) | Finds the minimum observer height needed to see each cell *(experimental)* | ✅️ | | | | | [Perlin Noise](xrspatial/perlin.py) | Generates smooth continuous random noise for procedural textures | ✅️ | ✅️ | ✅️ | ✅️ | diff --git a/benchmarks/benchmarks/terrain_metrics.py b/benchmarks/benchmarks/terrain_metrics.py new file mode 100644 index 00000000..47900c13 --- /dev/null +++ b/benchmarks/benchmarks/terrain_metrics.py @@ -0,0 +1,27 @@ +from xrspatial import tri, tpi, roughness + +from .common import Benchmarking + + +class TRI(Benchmarking): + def __init__(self): + super().__init__(func=tri) + + def time_tri(self, nx, type): + return self.time(nx, type) + + +class TPI(Benchmarking): + def __init__(self): + super().__init__(func=tpi) + + def time_tpi(self, nx, type): + return self.time(nx, type) + + +class Roughness(Benchmarking): + def __init__(self): + super().__init__(func=roughness) + + def time_roughness(self, nx, type): + return self.time(nx, type) diff --git a/xrspatial/__init__.py b/xrspatial/__init__.py index 70ff9e56..81c0ca2c 100644 --- a/xrspatial/__init__.py +++ b/xrspatial/__init__.py @@ -36,6 +36,9 @@ from xrspatial.surface_distance import surface_direction # noqa from xrspatial.surface_distance import surface_distance # noqa from xrspatial.terrain import generate_terrain # noqa +from xrspatial.terrain_metrics import roughness # noqa +from xrspatial.terrain_metrics import tpi # noqa +from xrspatial.terrain_metrics import tri # noqa from xrspatial.polygonize import polygonize # noqa from xrspatial.viewshed import viewshed # noqa from xrspatial.zonal import apply as zonal_apply # noqa diff --git a/xrspatial/accessor.py b/xrspatial/accessor.py index 7917efcf..d76c3f46 100644 --- a/xrspatial/accessor.py +++ b/xrspatial/accessor.py @@ -39,6 +39,20 @@ def curvature(self, **kwargs): from .curvature import curvature return curvature(self._obj, **kwargs) + # ---- Terrain Metrics ---- + + def tri(self, **kwargs): + from .terrain_metrics import tri + return tri(self._obj, **kwargs) + + def tpi(self, **kwargs): + from .terrain_metrics import tpi + return tpi(self._obj, **kwargs) + + def roughness(self, **kwargs): + from .terrain_metrics import roughness + return roughness(self._obj, **kwargs) + def viewshed(self, x, y, **kwargs): from .viewshed import viewshed return viewshed(self._obj, x, y, **kwargs) @@ -221,6 +235,20 @@ def curvature(self, **kwargs): from .curvature import curvature return curvature(self._obj, **kwargs) + # ---- Terrain Metrics ---- + + def tri(self, **kwargs): + from .terrain_metrics import tri + return tri(self._obj, **kwargs) + + def tpi(self, **kwargs): + from .terrain_metrics import tpi + return tpi(self._obj, **kwargs) + + def roughness(self, **kwargs): + from .terrain_metrics import roughness + return roughness(self._obj, **kwargs) + # ---- Classification ---- def natural_breaks(self, **kwargs): diff --git a/xrspatial/terrain_metrics.py b/xrspatial/terrain_metrics.py new file mode 100644 index 00000000..d4e55afe --- /dev/null +++ b/xrspatial/terrain_metrics.py @@ -0,0 +1,398 @@ +from __future__ import annotations + +from functools import partial +from math import isnan, sqrt +from typing import Optional + +try: + import cupy +except ImportError: + class cupy(object): + ndarray = False + +try: + import dask.array as da +except ImportError: + da = None + +import numpy as np +import xarray as xr +from numba import cuda + +from xrspatial.utils import ArrayTypeFunctionMapping +from xrspatial.utils import _boundary_to_dask +from xrspatial.utils import _pad_array +from xrspatial.utils import _validate_boundary +from xrspatial.utils import _validate_raster +from xrspatial.utils import cuda_args +from xrspatial.utils import ngjit +from xrspatial.dataset_support import supports_dataset + + +# --------------------------------------------------------------------------- +# CPU kernels +# --------------------------------------------------------------------------- + +@ngjit +def _tri_cpu(data): + out = np.empty(data.shape, np.float64) + out[:] = np.nan + rows, cols = data.shape + for y in range(1, rows - 1): + for x in range(1, cols - 1): + center = data[y, x] + total = 0.0 + for dy in range(-1, 2): + for dx in range(-1, 2): + if dy == 0 and dx == 0: + continue + diff = data[y + dy, x + dx] - center + total += diff * diff + out[y, x] = sqrt(total) + return out + + +@ngjit +def _tpi_cpu(data): + out = np.empty(data.shape, np.float64) + out[:] = np.nan + rows, cols = data.shape + for y in range(1, rows - 1): + for x in range(1, cols - 1): + center = data[y, x] + total = 0.0 + for dy in range(-1, 2): + for dx in range(-1, 2): + if dy == 0 and dx == 0: + continue + total += data[y + dy, x + dx] + out[y, x] = center - total / 8.0 + return out + + +@ngjit +def _roughness_cpu(data): + out = np.empty(data.shape, np.float64) + out[:] = np.nan + rows, cols = data.shape + for y in range(1, rows - 1): + for x in range(1, cols - 1): + has_nan = False + vmin = data[y - 1, x - 1] + vmax = vmin + for dy in range(-1, 2): + for dx in range(-1, 2): + v = data[y + dy, x + dx] + if isnan(v): + has_nan = True + break + if v < vmin: + vmin = v + if v > vmax: + vmax = v + if has_nan: + break + if not has_nan: + out[y, x] = vmax - vmin + return out + + +# --------------------------------------------------------------------------- +# GPU kernels +# --------------------------------------------------------------------------- + +@cuda.jit(device=True) +def _tri_gpu(arr): + center = arr[1, 1] + total = 0.0 + for dy in range(3): + for dx in range(3): + if dy == 1 and dx == 1: + continue + diff = arr[dy, dx] - center + total += diff * diff + return sqrt(total) + + +@cuda.jit(device=True) +def _tpi_gpu(arr): + center = arr[1, 1] + total = 0.0 + for dy in range(3): + for dx in range(3): + if dy == 1 and dx == 1: + continue + total += arr[dy, dx] + return center - total / 8.0 + + +@cuda.jit(device=True) +def _roughness_gpu(arr): + vmin = arr[0, 0] + vmax = arr[0, 0] + for dy in range(3): + for dx in range(3): + v = arr[dy, dx] + # NaN check: v != v is True only for NaN + if v != v: + return v + if v < vmin: + vmin = v + if v > vmax: + vmax = v + return vmax - vmin + + +@cuda.jit +def _tri_run_gpu(data, out): + i, j = cuda.grid(2) + if (i - 1 >= 0 and i + 1 <= out.shape[0] - 1 and + j - 1 >= 0 and j + 1 <= out.shape[1] - 1): + out[i, j] = _tri_gpu(data[i - 1:i + 2, j - 1:j + 2]) + + +@cuda.jit +def _tpi_run_gpu(data, out): + i, j = cuda.grid(2) + if (i - 1 >= 0 and i + 1 <= out.shape[0] - 1 and + j - 1 >= 0 and j + 1 <= out.shape[1] - 1): + out[i, j] = _tpi_gpu(data[i - 1:i + 2, j - 1:j + 2]) + + +@cuda.jit +def _roughness_run_gpu(data, out): + i, j = cuda.grid(2) + if (i - 1 >= 0 and i + 1 <= out.shape[0] - 1 and + j - 1 >= 0 and j + 1 <= out.shape[1] - 1): + out[i, j] = _roughness_gpu(data[i - 1:i + 2, j - 1:j + 2]) + + +# --------------------------------------------------------------------------- +# Backend wrapper factory +# --------------------------------------------------------------------------- + +def _make_backends(cpu_kernel, gpu_global_kernel): + """Return (run_numpy, run_cupy, run_dask_numpy, run_dask_cupy) tuple.""" + + def _run_numpy(data: np.ndarray, + boundary: str = 'nan') -> np.ndarray: + data = data.astype(np.float64) + if boundary == 'nan': + return cpu_kernel(data) + padded = _pad_array(data, 1, boundary) + result = cpu_kernel(padded) + return result[1:-1, 1:-1] + + def _run_dask_numpy(data: da.Array, + boundary: str = 'nan') -> da.Array: + data = data.astype(np.float64) + _func = cpu_kernel + out = data.map_overlap(_func, + depth=(1, 1), + boundary=_boundary_to_dask(boundary), + meta=np.array(())) + return out + + def _run_cupy(data: cupy.ndarray, + boundary: str = 'nan') -> cupy.ndarray: + if boundary != 'nan': + padded = _pad_array(data, 1, boundary) + result = _run_cupy(padded) + return result[1:-1, 1:-1] + + data = data.astype(cupy.float64) + griddim, blockdim = cuda_args(data.shape) + out = cupy.empty(data.shape, dtype='f8') + out[:] = cupy.nan + gpu_global_kernel[griddim, blockdim](data, out) + return out + + def _run_dask_cupy(data: da.Array, + boundary: str = 'nan') -> da.Array: + data = data.astype(cupy.float64) + _func = partial(_run_cupy, boundary=boundary) + out = data.map_overlap(_func, + depth=(1, 1), + boundary=_boundary_to_dask(boundary, is_cupy=True), + meta=cupy.array(())) + return out + + return _run_numpy, _run_cupy, _run_dask_numpy, _run_dask_cupy + + +_tri_backends = _make_backends(_tri_cpu, _tri_run_gpu) +_tpi_backends = _make_backends(_tpi_cpu, _tpi_run_gpu) +_roughness_backends = _make_backends(_roughness_cpu, _roughness_run_gpu) + + +# --------------------------------------------------------------------------- +# Public API +# --------------------------------------------------------------------------- + +@supports_dataset +def tri(agg: xr.DataArray, + name: Optional[str] = 'tri', + boundary: str = 'nan') -> xr.DataArray: + """Compute the Terrain Ruggedness Index (TRI) for each cell. + + TRI quantifies local elevation variation as the square root of + the sum of squared differences between the center cell and its + 8 neighbors in a 3x3 window (Riley et al. 1999). + + Parameters + ---------- + agg : xarray.DataArray or xr.Dataset + 2D NumPy, CuPy, NumPy-backed Dask, or CuPy-backed Dask + xarray DataArray of elevation values. + If a Dataset is passed, the operation is applied to each + data variable independently. + name : str, default='tri' + Name of output DataArray. + boundary : str, default='nan' + How to handle edges where the kernel extends beyond the raster. + ``'nan'`` - fill missing neighbours with NaN (default). + ``'nearest'`` - repeat edge values. + ``'reflect'`` - mirror at boundary. + ``'wrap'`` - periodic / toroidal. + + Returns + ------- + xarray.DataArray or xr.Dataset + 2D array of TRI values with the same shape, coords, dims, + and attrs as the input. + + References + ---------- + Riley, S.J., DeGloria, S.D. and Elliot, R. (1999). A Terrain + Ruggedness Index that Quantifies Topographic Heterogeneity. + Intermountain Journal of Sciences, 5(1-4), 23-27. + """ + _validate_raster(agg, func_name='tri', name='agg') + _validate_boundary(boundary) + + run_np, run_cupy, run_dask_np, run_dask_cupy = _tri_backends + mapper = ArrayTypeFunctionMapping( + numpy_func=run_np, + cupy_func=run_cupy, + dask_func=run_dask_np, + dask_cupy_func=run_dask_cupy, + ) + out = mapper(agg)(agg.data, boundary) + return xr.DataArray(out, + name=name, + coords=agg.coords, + dims=agg.dims, + attrs=agg.attrs) + + +@supports_dataset +def tpi(agg: xr.DataArray, + name: Optional[str] = 'tpi', + boundary: str = 'nan') -> xr.DataArray: + """Compute the Topographic Position Index (TPI) for each cell. + + TPI is the difference between the elevation of the center cell + and the mean elevation of its 8 neighbors in a 3x3 window + (Weiss 2001). Positive values indicate ridgetops, negative + values indicate valleys, and near-zero values indicate flat + areas or mid-slope positions. + + Parameters + ---------- + agg : xarray.DataArray or xr.Dataset + 2D NumPy, CuPy, NumPy-backed Dask, or CuPy-backed Dask + xarray DataArray of elevation values. + If a Dataset is passed, the operation is applied to each + data variable independently. + name : str, default='tpi' + Name of output DataArray. + boundary : str, default='nan' + How to handle edges where the kernel extends beyond the raster. + ``'nan'`` - fill missing neighbours with NaN (default). + ``'nearest'`` - repeat edge values. + ``'reflect'`` - mirror at boundary. + ``'wrap'`` - periodic / toroidal. + + Returns + ------- + xarray.DataArray or xr.Dataset + 2D array of TPI values with the same shape, coords, dims, + and attrs as the input. + + References + ---------- + Weiss, A. (2001). Topographic Position and Landforms Analysis. + Poster presentation, ESRI User Conference, San Diego, CA. + """ + _validate_raster(agg, func_name='tpi', name='agg') + _validate_boundary(boundary) + + run_np, run_cupy, run_dask_np, run_dask_cupy = _tpi_backends + mapper = ArrayTypeFunctionMapping( + numpy_func=run_np, + cupy_func=run_cupy, + dask_func=run_dask_np, + dask_cupy_func=run_dask_cupy, + ) + out = mapper(agg)(agg.data, boundary) + return xr.DataArray(out, + name=name, + coords=agg.coords, + dims=agg.dims, + attrs=agg.attrs) + + +@supports_dataset +def roughness(agg: xr.DataArray, + name: Optional[str] = 'roughness', + boundary: str = 'nan') -> xr.DataArray: + """Compute the roughness for each cell. + + Roughness is the difference between the maximum and minimum + elevation values in a 3x3 window (including the center cell). + This is the same definition used by GDAL's ``gdaldem roughness``. + + Parameters + ---------- + agg : xarray.DataArray or xr.Dataset + 2D NumPy, CuPy, NumPy-backed Dask, or CuPy-backed Dask + xarray DataArray of elevation values. + If a Dataset is passed, the operation is applied to each + data variable independently. + name : str, default='roughness' + Name of output DataArray. + boundary : str, default='nan' + How to handle edges where the kernel extends beyond the raster. + ``'nan'`` - fill missing neighbours with NaN (default). + ``'nearest'`` - repeat edge values. + ``'reflect'`` - mirror at boundary. + ``'wrap'`` - periodic / toroidal. + + Returns + ------- + xarray.DataArray or xr.Dataset + 2D array of roughness values with the same shape, coords, dims, + and attrs as the input. + + References + ---------- + GDAL/OGR contributors (2024). GDAL/OGR Geospatial Data Abstraction + software Library. Open Source Geospatial Foundation. + https://gdal.org/programs/gdaldem.html + """ + _validate_raster(agg, func_name='roughness', name='agg') + _validate_boundary(boundary) + + run_np, run_cupy, run_dask_np, run_dask_cupy = _roughness_backends + mapper = ArrayTypeFunctionMapping( + numpy_func=run_np, + cupy_func=run_cupy, + dask_func=run_dask_np, + dask_cupy_func=run_dask_cupy, + ) + out = mapper(agg)(agg.data, boundary) + return xr.DataArray(out, + name=name, + coords=agg.coords, + dims=agg.dims, + attrs=agg.attrs) diff --git a/xrspatial/tests/test_terrain_metrics.py b/xrspatial/tests/test_terrain_metrics.py new file mode 100644 index 00000000..4801c005 --- /dev/null +++ b/xrspatial/tests/test_terrain_metrics.py @@ -0,0 +1,350 @@ +import numpy as np +import pytest +import xarray as xr + +from xrspatial import tri, tpi, roughness +from xrspatial.tests.general_checks import (assert_boundary_mode_correctness, + assert_numpy_equals_cupy, + assert_numpy_equals_dask_cupy, + assert_numpy_equals_dask_numpy, + create_test_raster, + cuda_and_cupy_available, + dask_array_available, + general_output_checks) + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture +def flat_surface(): + """Constant elevation — all three metrics should be 0 in the interior.""" + data = np.full((6, 8), 42.0, dtype=np.float64) + return data + + +@pytest.fixture +def known_surface(): + """Hand-crafted 5×5 grid with pre-computed expected outputs. + + Grid: + 1 2 3 4 5 + 6 7 8 9 10 + 11 12 13 14 15 + 16 17 18 19 20 + 21 22 23 24 25 + """ + data = np.arange(1, 26, dtype=np.float64).reshape(5, 5) + return data + + +# --------------------------------------------------------------------------- +# Hand-computed expected values for the known_surface +# --------------------------------------------------------------------------- + +def _expected_tri_known(): + """Compute expected TRI for interior cells of the 5×5 grid.""" + data = np.arange(1, 26, dtype=np.float64).reshape(5, 5) + out = np.full((5, 5), np.nan) + for y in range(1, 4): + for x in range(1, 4): + center = data[y, x] + total = 0.0 + for dy in range(-1, 2): + for dx in range(-1, 2): + if dy == 0 and dx == 0: + continue + diff = data[y + dy, x + dx] - center + total += diff * diff + out[y, x] = np.sqrt(total) + return out + + +def _expected_tpi_known(): + """Compute expected TPI for interior cells of the 5×5 grid.""" + data = np.arange(1, 26, dtype=np.float64).reshape(5, 5) + out = np.full((5, 5), np.nan) + for y in range(1, 4): + for x in range(1, 4): + center = data[y, x] + total = 0.0 + count = 0 + for dy in range(-1, 2): + for dx in range(-1, 2): + if dy == 0 and dx == 0: + continue + total += data[y + dy, x + dx] + count += 1 + out[y, x] = center - total / count + return out + + +def _expected_roughness_known(): + """Compute expected roughness for interior cells of the 5×5 grid.""" + data = np.arange(1, 26, dtype=np.float64).reshape(5, 5) + out = np.full((5, 5), np.nan) + for y in range(1, 4): + for x in range(1, 4): + vmin = np.inf + vmax = -np.inf + for dy in range(-1, 2): + for dx in range(-1, 2): + v = data[y + dy, x + dx] + if v < vmin: + vmin = v + if v > vmax: + vmax = v + out[y, x] = vmax - vmin + return out + + +# --------------------------------------------------------------------------- +# Flat surface tests +# --------------------------------------------------------------------------- + +def test_tri_flat(flat_surface): + agg = create_test_raster(flat_surface) + result = tri(agg) + interior = result.data[1:-1, 1:-1] + np.testing.assert_allclose(interior, 0.0, atol=1e-10) + + +def test_tpi_flat(flat_surface): + agg = create_test_raster(flat_surface) + result = tpi(agg) + interior = result.data[1:-1, 1:-1] + np.testing.assert_allclose(interior, 0.0, atol=1e-10) + + +def test_roughness_flat(flat_surface): + agg = create_test_raster(flat_surface) + result = roughness(agg) + interior = result.data[1:-1, 1:-1] + np.testing.assert_allclose(interior, 0.0, atol=1e-10) + + +# --------------------------------------------------------------------------- +# Known-values tests +# --------------------------------------------------------------------------- + +def test_tri_known(known_surface): + agg = create_test_raster(known_surface) + result = tri(agg) + expected = _expected_tri_known() + general_output_checks(agg, result, expected) + + +def test_tpi_known(known_surface): + agg = create_test_raster(known_surface) + result = tpi(agg) + expected = _expected_tpi_known() + general_output_checks(agg, result, expected) + + +def test_roughness_known(known_surface): + agg = create_test_raster(known_surface) + result = roughness(agg) + expected = _expected_roughness_known() + general_output_checks(agg, result, expected) + + +# --------------------------------------------------------------------------- +# NaN handling tests +# --------------------------------------------------------------------------- + +def test_nan_propagation(): + """NaN in any neighbor should propagate to the output (GDAL behavior).""" + data = np.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, np.nan, 7.0, 8.0], + [9.0, 10.0, 11.0, 12.0], + [13.0, 14.0, 15.0, 16.0], + ]) + agg = create_test_raster(data) + + # Cell (1,2)=7.0 has NaN neighbor at (1,1) -> all three metrics NaN + tri_result = tri(agg) + assert np.isnan(tri_result.data[1, 2]) + + tpi_result = tpi(agg) + assert np.isnan(tpi_result.data[1, 2]) + + roughness_result = roughness(agg) + assert np.isnan(roughness_result.data[1, 2]) + + # Cell (2,2)=11.0 also has NaN neighbor at (1,1) -> NaN + assert np.isnan(tri_result.data[2, 2]) + + # Cell (2,1)=10.0 has NaN neighbor at (1,1) -> NaN + assert np.isnan(tpi_result.data[2, 1]) + + # Cell with no NaN neighbors: (2,2) has NaN neighbor so skip; + # Check (1,1) which is NaN center -> NaN output + assert np.isnan(tri_result.data[1, 1]) + assert np.isnan(tpi_result.data[1, 1]) + + +def test_no_nan_in_window(): + """Interior cells with no NaN neighbors should produce valid results.""" + data = np.arange(1, 26, dtype=np.float64).reshape(5, 5) + agg = create_test_raster(data) + + # Interior cell (2,2) = 13, all neighbors are valid + tri_result = tri(agg) + assert np.isfinite(tri_result.data[2, 2]) + + tpi_result = tpi(agg) + assert np.isfinite(tpi_result.data[2, 2]) + + +# --------------------------------------------------------------------------- +# Edge NaN tests +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_nan_edges(func): + data = np.random.default_rng(42).random((6, 8)).astype(np.float64) * 100 + agg = create_test_raster(data) + result = func(agg) + # Edges should be NaN with default boundary='nan' + np.testing.assert_array_equal(result.data[0, :], np.nan) + np.testing.assert_array_equal(result.data[-1, :], np.nan) + np.testing.assert_array_equal(result.data[:, 0], np.nan) + np.testing.assert_array_equal(result.data[:, -1], np.nan) + + +# --------------------------------------------------------------------------- +# Cross-backend tests +# --------------------------------------------------------------------------- + +@dask_array_available +@pytest.mark.parametrize("size", [(4, 6), (10, 15)]) +@pytest.mark.parametrize( + "dtype", [np.int32, np.int64, np.float32, np.float64]) +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_numpy_equals_dask(random_data, func): + numpy_agg = create_test_raster(random_data, backend='numpy') + dask_agg = create_test_raster(random_data, backend='dask') + assert_numpy_equals_dask_numpy(numpy_agg, dask_agg, func) + + +@cuda_and_cupy_available +@pytest.mark.parametrize("size", [(4, 6), (10, 15)]) +@pytest.mark.parametrize( + "dtype", [np.int32, np.int64, np.float32, np.float64]) +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_numpy_equals_cupy(random_data, func): + numpy_agg = create_test_raster(random_data, backend='numpy') + cupy_agg = create_test_raster(random_data, backend='cupy') + assert_numpy_equals_cupy(numpy_agg, cupy_agg, func) + + +@dask_array_available +@cuda_and_cupy_available +@pytest.mark.parametrize("size", [(4, 6), (10, 15)]) +@pytest.mark.parametrize( + "dtype", [np.int32, np.int64, np.float32, np.float64]) +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_numpy_equals_dask_cupy(random_data, func): + numpy_agg = create_test_raster(random_data, backend='numpy') + dask_cupy_agg = create_test_raster(random_data, backend='dask+cupy') + assert_numpy_equals_dask_cupy(numpy_agg, dask_cupy_agg, func, + atol=1e-6, rtol=1e-6) + + +# --------------------------------------------------------------------------- +# Boundary mode tests +# --------------------------------------------------------------------------- + +@dask_array_available +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_boundary_modes(func): + data = np.random.default_rng(42).random((8, 10)).astype(np.float64) * 100 + numpy_agg = create_test_raster(data) + dask_agg = create_test_raster(data, backend='dask+numpy') + assert_boundary_mode_correctness(numpy_agg, dask_agg, func) + + +@dask_array_available +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +@pytest.mark.parametrize("boundary", ['nan', 'nearest', 'reflect', 'wrap']) +@pytest.mark.parametrize("size,chunks", [ + ((6, 8), (3, 4)), + ((7, 9), (3, 3)), + ((10, 15), (5, 5)), + ((10, 15), (10, 15)), + ((5, 5), (2, 2)), +]) +def test_boundary_numpy_equals_dask(func, boundary, size, chunks): + rng = np.random.default_rng(42) + data = rng.random(size).astype(np.float64) * 100 + numpy_agg = create_test_raster(data, backend='numpy') + dask_agg = create_test_raster(data, backend='dask+numpy', chunks=chunks) + np_result = func(numpy_agg, boundary=boundary) + da_result = func(dask_agg, boundary=boundary) + np.testing.assert_allclose( + np_result.data, da_result.data.compute(), equal_nan=True, rtol=1e-5) + + +@dask_array_available +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +@pytest.mark.parametrize("boundary", ['nearest', 'reflect', 'wrap']) +def test_boundary_no_nan_flat(func, boundary): + """Flat surface with non-nan boundary should produce all zeros, no NaN.""" + data = np.full((8, 10), 50.0, dtype=np.float64) + numpy_agg = create_test_raster(data, backend='numpy') + dask_agg = create_test_raster(data, backend='dask+numpy', chunks=(4, 5)) + np_result = func(numpy_agg, boundary=boundary) + da_result = func(dask_agg, boundary=boundary) + np.testing.assert_allclose(np_result.data, 0.0, atol=1e-10) + np.testing.assert_allclose(np_result.data, da_result.data.compute(), + equal_nan=True, rtol=1e-6) + + +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +def test_boundary_invalid(func): + data = np.ones((4, 5), dtype=np.float64) + agg = create_test_raster(data) + with pytest.raises(ValueError, match="boundary must be one of"): + func(agg, boundary='invalid') + + +# --------------------------------------------------------------------------- +# Dtype preservation tests +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("func", [tri, tpi, roughness]) +@pytest.mark.parametrize( + "dtype", [np.int32, np.int64, np.uint32, np.uint64, np.float32, np.float64]) +def test_dtype_acceptance(func, dtype): + data = np.arange(20, dtype=dtype).reshape(4, 5) + agg = create_test_raster(data) + result = func(agg) + assert result.shape == agg.shape + assert result.dims == agg.dims + + +# --------------------------------------------------------------------------- +# Dataset support tests +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("func,expected_name", [ + (tri, 'tri'), + (tpi, 'tpi'), + (roughness, 'roughness'), +]) +def test_dataset_support(func, expected_name): + data = np.random.default_rng(42).random((6, 8)).astype(np.float64) * 100 + da1 = xr.DataArray(data, dims=['y', 'x'], attrs={'res': (0.5, 0.5)}) + da1['y'] = np.linspace(2.5, 0, 6) + da1['x'] = np.linspace(0, 3.5, 8) + ds = xr.Dataset({'elev1': da1, 'elev2': da1 * 2}) + result = func(ds) + assert isinstance(result, xr.Dataset) + assert set(result.data_vars) == {'elev1', 'elev2'} + # Individual results should match calling on a DataArray directly + for var in result.data_vars: + expected = func(ds[var], name=var) + np.testing.assert_allclose( + result[var].data, expected.data, equal_nan=True)