-
Notifications
You must be signed in to change notification settings - Fork 86
Expand file tree
/
Copy pathzonal.py
More file actions
2503 lines (2078 loc) · 78.9 KB
/
zonal.py
File metadata and controls
2503 lines (2078 loc) · 78.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from __future__ import annotations
# standard library
import copy
from math import sqrt
from typing import Callable, Dict, List, Optional, Union
# 3rd-party
try:
import dask
import dask.array as da
except ImportError:
dask = None
da = None
try:
import dask.dataframe as dd
except ImportError:
dd = None
try:
from dask import delayed
except ImportError:
delayed = lambda x: None # noqa
import numpy as np
import pandas as pd
import xarray as xr
from xarray import DataArray
try:
import cupy
except ImportError:
class cupy(object):
ndarray = False
# local modules
from xrspatial.utils import (
ArrayTypeFunctionMapping, _validate_raster, cuda_args, has_cuda_and_cupy,
is_cupy_array, is_dask_cupy,
ngjit, not_implemented_func, validate_arrays,
)
from xrspatial.utils import has_dask_array
TOTAL_COUNT = '_total_count'
def _maybe_rasterize_zones(zones, values, column=None, rasterize_kw=None):
"""If *zones* is vector data, rasterize it using *values* as the template.
Accepts:
- ``GeoDataFrame`` (requires *column* to identify the zone-ID field)
- list of ``(geometry, value)`` pairs
Returns a 2-D ``xr.DataArray`` of zone IDs aligned to *values*.
If *zones* is already a DataArray it is returned unchanged.
"""
if isinstance(zones, xr.DataArray):
return zones
# list-of-pairs: [(geom, value), ...]
is_pairs = (
isinstance(zones, (list, tuple))
and len(zones) > 0
and isinstance(zones[0], (list, tuple))
and len(zones[0]) == 2
)
# GeoDataFrame
is_gdf = False
try:
import geopandas as gpd
is_gdf = isinstance(zones, gpd.GeoDataFrame)
except ImportError:
pass
if not is_pairs and not is_gdf:
return zones
from .rasterize import rasterize
# Build the template from values (first 2D variable if Dataset)
if isinstance(values, xr.Dataset):
for var in values.data_vars:
da_var = values[var]
if da_var.ndim >= 2 and 'y' in da_var.dims and 'x' in da_var.dims:
like = da_var
break
else:
raise ValueError(
"values Dataset has no 2D variable with 'y' and 'x' "
"dimensions to use as rasterize template"
)
elif isinstance(values, xr.DataArray):
if values.ndim >= 2:
like = values
else:
raise ValueError(
"values must be at least 2D to use as rasterize template"
)
else:
raise TypeError(
f"values must be an xr.DataArray or xr.Dataset, got {type(values)}"
)
kw = dict(rasterize_kw or {})
kw['like'] = like
if is_gdf:
if column is None:
raise ValueError(
"column is required when zones is a GeoDataFrame. "
"Specify which column contains zone IDs."
)
kw['column'] = column
elif is_pairs:
if column is not None:
raise ValueError(
"column should not be set when zones is a list of "
"(geometry, value) pairs"
)
return rasterize(zones, **kw)
def _unique_finite_zones(arr):
"""Sorted unique finite values from *arr* without full materialisation.
For dask arrays uses ``da.unique`` (per-chunk reduction) so the full
array is never pulled into RAM.
"""
if da is not None and isinstance(arr, da.Array):
uniq = da.unique(arr).compute()
return uniq[np.isfinite(uniq)]
return np.unique(arr[np.isfinite(arr)])
def _unique_finite_cats(arr, nodata_values):
"""Sorted unique values excluding NaN, Inf, and *nodata_values*.
Dask-safe: uses ``da.unique`` so the full array is never materialised.
"""
if da is not None and isinstance(arr, da.Array):
uniq = da.unique(arr).compute()
mask = np.isfinite(uniq)
if nodata_values is not None:
mask &= (uniq != nodata_values)
return uniq[mask]
mask = np.isfinite(arr)
if nodata_values is not None:
mask &= (arr != nodata_values)
return np.unique(arr[mask])
def _stats_count(data):
if isinstance(data, np.ndarray):
# numpy case
stats_count = np.ma.count(data)
elif isinstance(data, cupy.ndarray):
# cupy case
stats_count = np.prod(data.shape)
else:
# dask case
stats_count = data.size - da.ma.getmaskarray(data).sum()
return stats_count
def _stats_majority(data):
if isinstance(data, np.ndarray):
# numpy case
values, counts = np.unique(data, return_counts=True)
return values[np.argmax(counts)]
elif isinstance(data, cupy.ndarray):
# cupy case
values, counts = cupy.unique(data, return_counts=True)
return values[cupy.argmax(counts)]
else:
# dask case
values, counts = da.unique(data, return_counts=True)
return values[da.argmax(counts)]
_DEFAULT_STATS = dict(
mean=lambda z: z.mean(),
max=lambda z: z.max(),
min=lambda z: z.min(),
sum=lambda z: z.sum(),
std=lambda z: z.std(),
var=lambda z: z.var(),
count=lambda z: _stats_count(z),
majority=lambda z: _stats_majority(z),
)
_DASK_BLOCK_STATS = dict(
max=lambda z: z.max(),
min=lambda z: z.min(),
sum=lambda z: z.sum(),
count=lambda z: _stats_count(z),
sum_squares=lambda z: ((z - z.mean()) ** 2).sum() # block-level M2
)
def _nanreduce_preserve_allnan(blocks, func):
"""Reduce across blocks, returning NaN when ALL blocks are NaN for a zone.
``np.nansum`` returns 0 for all-NaN input; we want NaN so that zones
with no valid values propagate NaN, consistent with the numpy backend.
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
result = func(blocks, axis=0)
all_nan = np.all(np.isnan(blocks), axis=0)
result[all_nan] = np.nan
return result
_DASK_STATS = dict(
max=lambda blocks: _nanreduce_preserve_allnan(blocks, np.nanmax),
min=lambda blocks: _nanreduce_preserve_allnan(blocks, np.nanmin),
sum=lambda blocks: _nanreduce_preserve_allnan(blocks, np.nansum),
count=lambda blocks: _nanreduce_preserve_allnan(blocks, np.nansum),
sum_squares=lambda blocks: _nanreduce_preserve_allnan(blocks, np.nansum),
)
def _dask_mean(sums, counts): # noqa
return sums / counts
def _parallel_variance(block_counts, block_sums, block_m2s):
"""Population variance via Chan-Golub-LeVeque parallel merge.
Each input is (n_blocks, n_zones). ``block_m2s`` contains
per-block M2 values (sum of squared deviations from the block mean),
NOT raw sum-of-squares. Returns (n_zones,) population variance,
with NaN for zones that have no valid values in any block.
"""
n_blocks = block_counts.shape[0]
n_zones = block_counts.shape[1]
n_acc = np.zeros(n_zones, dtype=np.float64)
mean_acc = np.zeros(n_zones, dtype=np.float64)
m2_acc = np.zeros(n_zones, dtype=np.float64)
for i in range(n_blocks):
nc = np.asarray(block_counts[i], dtype=np.float64)
sc = np.asarray(block_sums[i], dtype=np.float64)
m2_b = np.asarray(block_m2s[i], dtype=np.float64)
has_data = np.isfinite(nc) & (nc > 0)
nc_safe = np.where(has_data, nc, 1.0) # avoid /0
with np.errstate(invalid='ignore', divide='ignore'):
mean_b = sc / nc_safe
nc = np.where(has_data, nc, 0.0)
n_ab = n_acc + nc
delta = mean_b - mean_acc
with np.errstate(invalid='ignore', divide='ignore'):
n_ab_safe = np.where(n_ab > 0, n_ab, 1.0)
correction = delta ** 2 * n_acc * nc / n_ab_safe
new_mean = mean_acc + delta * nc / n_ab_safe
m2_acc = np.where(has_data, m2_acc + m2_b + correction, m2_acc)
mean_acc = np.where(has_data, new_mean, mean_acc)
n_acc = np.where(has_data, n_ab, n_acc)
with np.errstate(invalid='ignore', divide='ignore'):
var = np.where(n_acc > 0, m2_acc / n_acc, np.nan)
return var
@ngjit
def _strides(flatten_zones, unique_zones):
num_elements = flatten_zones.shape[0]
num_zones = len(unique_zones)
strides = np.zeros(len(unique_zones), dtype=np.int32)
count = 0
for i in range(num_zones):
while (count < num_elements) and (
flatten_zones[count] == unique_zones[i]):
count += 1
strides[i] = count
return strides
def _sort_and_stride(zones, values, unique_zones):
flatten_zones = zones.ravel()
sorted_indices = np.argsort(flatten_zones)
sorted_zones = flatten_zones[sorted_indices]
values_shape = values.shape
if len(values_shape) == 3:
values_by_zones = copy.deepcopy(values).reshape(
values_shape[0], values_shape[1] * values_shape[2])
for i in range(values_shape[0]):
values_by_zones[i] = values_by_zones[i][sorted_indices]
else:
values_by_zones = values.ravel()[sorted_indices]
# exclude nans from calculation
# flatten_zones is already sorted, NaN elements (if any) are at the end
# of the array, removing them will not affect data before them
sorted_zones = sorted_zones[np.isfinite(sorted_zones)]
zone_breaks = _strides(sorted_zones, unique_zones)
return sorted_indices, values_by_zones, zone_breaks
def _calc_stats(
values_by_zones: np.array,
zone_breaks: np.array,
unique_zones: np.array,
zone_ids: np.array,
func: Callable,
nodata_values: Union[int, float] = None,
):
start = 0
results = np.full(unique_zones.shape, np.nan)
for i in range(len(unique_zones)):
end = zone_breaks[i]
if unique_zones[i] in zone_ids:
zone_values = values_by_zones[start:end]
# filter out non-finite and nodata_values
mask = np.isfinite(zone_values)
if nodata_values is not None:
mask = mask & (zone_values != nodata_values)
zone_values = zone_values[mask]
if len(zone_values) > 0:
results[i] = func(zone_values)
start = end
return results
@delayed
def _single_stats_func(
zones_block: np.array,
values_block: np.array,
unique_zones: np.array,
zone_ids: np.array,
func: Callable,
nodata_values: Union[int, float] = None,
) -> pd.DataFrame:
_, values_by_zones, zone_breaks = _sort_and_stride(zones_block, values_block, unique_zones)
results = _calc_stats(values_by_zones, zone_breaks, unique_zones, zone_ids, func, nodata_values)
return results
def _stats_dask_numpy(
zones: da.Array,
values: da.Array,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
) -> pd.DataFrame:
# find ids for all zones
unique_zones = _unique_finite_zones(zones)
select_all_zones = False
# selecte zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
select_all_zones = True
zones_blocks = zones.to_delayed().ravel()
values_blocks = values.to_delayed().ravel()
stats_dict = {}
stats_dict["zone"] = da.from_delayed( # zone column
delayed(lambda x: x)(unique_zones),
shape=(np.nan,), dtype=unique_zones.dtype,
)
compute_sum_squares = False
compute_sum = False
compute_count = False
if any(s in stats_funcs for s in ('mean', 'std', 'var')):
compute_sum = True
compute_count = True
if any(s in stats_funcs for s in ('std', 'var')):
compute_sum_squares = True
basis_stats = [s for s in _DASK_BLOCK_STATS if s in stats_funcs]
if compute_count and 'count' not in basis_stats:
basis_stats.append('count')
if compute_sum and 'sum' not in basis_stats:
basis_stats.append('sum')
if compute_sum_squares:
basis_stats.append('sum_squares')
dask_dtypes = dict(
max=values.dtype,
min=values.dtype,
sum=values.dtype,
count=np.int64,
sum_squares=values.dtype,
)
# Keep per-block stacked arrays for the parallel variance merge
stacked_blocks = {}
for s in basis_stats:
if s == 'sum_squares' and not compute_sum_squares:
continue
stats_func = _DASK_BLOCK_STATS.get(s)
stats_by_block = [
da.from_delayed(
delayed(_single_stats_func)(
z, v, unique_zones, zone_ids, stats_func, nodata_values
), shape=(np.nan,), dtype=dask_dtypes[s]
)
for z, v in zip(zones_blocks, values_blocks)
]
zonal_stats = da.stack(stats_by_block, allow_unknown_chunksizes=True)
if compute_sum_squares and s in ('count', 'sum', 'sum_squares'):
stacked_blocks[s] = zonal_stats
stats_func_by_block = delayed(_DASK_STATS[s])
stats_dict[s] = da.from_delayed(
stats_func_by_block(zonal_stats), shape=(np.nan,), dtype=np.float64
)
if 'mean' in stats_funcs:
stats_dict['mean'] = _dask_mean(stats_dict['sum'], stats_dict['count'])
if 'std' in stats_funcs or 'var' in stats_funcs:
var_result = da.from_delayed(
delayed(_parallel_variance)(
stacked_blocks['count'],
stacked_blocks['sum'],
stacked_blocks['sum_squares'],
),
shape=(np.nan,), dtype=np.float64,
)
if 'var' in stats_funcs:
stats_dict['var'] = var_result
if 'std' in stats_funcs:
stats_dict['std'] = da.from_delayed(
delayed(np.sqrt)(var_result),
shape=(np.nan,), dtype=np.float64,
)
# generate dask dataframe
stats_df = dd.concat([dd.from_dask_array(s) for s in stats_dict.values()], axis=1, ignore_unknown_divisions=True)
# name columns
stats_df.columns = stats_dict.keys()
# select columns (only include stats that were actually computed)
computed_stats = [s for s in stats_funcs.keys() if s in stats_dict]
stats_df = stats_df[['zone'] + computed_stats]
if not select_all_zones:
# Filter to requested zones using boolean indexing (avoids
# iterrows() which materializes every row one at a time).
zone_set = set(zone_ids)
stats_df = stats_df[stats_df['zone'].isin(zone_set)]
return stats_df
def _stats_numpy(
zones: xr.DataArray,
values: xr.DataArray,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
return_type: str,
) -> Union[pd.DataFrame, np.ndarray]:
# find ids for all zones
unique_zones = _unique_finite_zones(zones)
# selected zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
else:
zone_ids = np.unique(zone_ids)
# remove zones that do not exist in `zones` raster
zone_ids = [z for z in zone_ids if z in unique_zones]
sorted_indices, values_by_zones, zone_breaks = _sort_and_stride(zones, values, unique_zones)
if return_type == 'pandas.DataFrame':
stats_dict = {}
stats_dict["zone"] = zone_ids
selected_indexes = [i for i, z in enumerate(unique_zones) if z in zone_ids]
for stats in stats_funcs:
func = stats_funcs.get(stats)
stats_dict[stats] = _calc_stats(
values_by_zones, zone_breaks,
unique_zones, zone_ids, func, nodata_values
)
stats_dict[stats] = stats_dict[stats][selected_indexes]
result = pd.DataFrame(stats_dict)
else:
result = np.full((len(stats_funcs), values.size), np.nan)
zone_ids_map = {z: i for i, z in enumerate(unique_zones) if z in zone_ids}
stats_id = 0
for stats in stats_funcs:
func = stats_funcs.get(stats)
stats_results = _calc_stats(
values_by_zones, zone_breaks,
unique_zones, zone_ids, func, nodata_values
)
for zone in zone_ids:
iz = zone_ids_map[zone] # position of zone in unique_zones
if iz == 0:
zs = sorted_indices[: zone_breaks[iz]]
else:
zs = sorted_indices[zone_breaks[iz-1]: zone_breaks[iz]]
result[stats_id][zs] = stats_results[iz]
stats_id += 1
result = result.reshape(len(stats_funcs), *values.shape)
return result
def _stats_cupy(
orig_zones: xr.DataArray,
orig_values: xr.DataArray,
zone_ids: List[Union[int, float]],
stats_funcs: Dict,
nodata_values: Union[int, float],
) -> pd.DataFrame:
# TODO add support for 3D input
if len(orig_values.shape) > 2:
raise TypeError('3D inputs not supported for cupy backend')
zones = cupy.ravel(orig_zones)
values = cupy.ravel(orig_values)
sorted_indices = cupy.argsort(zones)
sorted_zones = zones[sorted_indices]
values_by_zone = values[sorted_indices]
# filter out values that are non-finite or values equal to nodata_values
if nodata_values:
filter_values = cupy.isfinite(values_by_zone) & (
values_by_zone != nodata_values)
else:
filter_values = cupy.isfinite(values_by_zone)
values_by_zone = values_by_zone[filter_values]
sorted_zones = sorted_zones[filter_values]
# Now I need to find the unique zones, and zone breaks
unique_zones, unique_index, unique_counts = cupy.unique(
sorted_zones, return_index=True, return_counts=True)
# Transfer to the host
unique_index = unique_index.get()
unique_counts = unique_counts.get()
unique_zones = unique_zones.get()
if zone_ids is not None:
# We need to extract the index and element count
# only for the elements in zone_ids
unique_index_lst = []
unique_counts_lst = []
unique_zones = list(unique_zones)
for z in zone_ids:
try:
idx = unique_zones.index(z)
unique_index_lst.append(unique_index[idx])
unique_counts_lst.append(unique_counts[idx])
except ValueError:
continue
unique_zones = zone_ids
unique_counts = unique_counts_lst
unique_index = unique_index_lst
# stats columns
stats_dict = {'zone': []}
for stats in stats_funcs:
stats_dict[stats] = []
for i in range(len(unique_zones)):
zone_id = unique_zones[i]
# skip zone_id == nodata_zones, and non-finite zone ids
if not np.isfinite(zone_id):
continue
stats_dict['zone'].append(zone_id)
# extract zone_values
zone_values = values_by_zone[unique_index[i]:unique_index[i]+unique_counts[i]]
# apply stats on the zone data
for j, stats in enumerate(stats_funcs):
stats_func = stats_funcs.get(stats)
if not callable(stats_func):
raise ValueError(stats)
result = stats_func(zone_values)
assert (len(result.shape) == 0)
stats_dict[stats].append(cupy.float_(result))
stats_df = pd.DataFrame(stats_dict)
stats_df.set_index("zone")
return stats_df
def _stats_dask_cupy(
zones,
values,
zone_ids,
stats_funcs,
nodata_values,
):
zones_cpu = zones.map_blocks(
lambda x: x.get(), dtype=zones.dtype, meta=np.array(()),
)
values_cpu = values.map_blocks(
lambda x: x.get(), dtype=values.dtype, meta=np.array(()),
)
return _stats_dask_numpy(
zones_cpu, values_cpu, zone_ids, stats_funcs, nodata_values,
)
def stats(
zones,
values: xr.DataArray,
zone_ids: Optional[List[Union[int, float]]] = None,
stats_funcs: Union[Dict, List] = [
"mean",
"max",
"min",
"sum",
"std",
"var",
"count",
"majority",
],
nodata_values: Union[int, float] = None,
return_type: str = 'pandas.DataFrame',
column: Optional[str] = None,
rasterize_kw: Optional[dict] = None,
) -> Union[pd.DataFrame, dd.DataFrame, xr.DataArray]:
"""
Calculate summary statistics for each zone defined by a `zones`
dataset, based on `values` aggregate.
A single output value is computed for every zone in the input `zones`
dataset.
This function currently supports numpy backed, and dask with numpy backed
xarray DataArrays.
Parameters
----------
zones : xr.DataArray, GeoDataFrame, or list of (geometry, value) pairs
Zone definitions. Can be:
- A 2D xarray DataArray of numeric zone IDs.
- A ``geopandas.GeoDataFrame`` (requires *column*).
- A list of ``(shapely geometry, zone_id)`` pairs.
When vector input is provided, ``rasterize()`` is called internally
using *values* as the template grid. Results depend on raster
resolution.
values : xr.DataArray or xr.Dataset
values is a 2D xarray DataArray of numeric values (integers or floats).
The input `values` raster contains the input values used in
calculating the output statistic for each zone. In dask case,
the chunksizes of `zones` and `values` should be matching. If not,
`values` will be rechunked to be the same as of `zones`.
When a Dataset is passed, stats are computed for each variable
and columns are prefixed with the variable name
(e.g. ``elevation_mean``).
For 3D time-series DataArrays, convert to a Dataset first using
``.to_dataset(dim='time')`` and pass the resulting Dataset.
zone_ids : list of ints, or floats
List of zones to be included in calculation. If no zone_ids provided,
all zones will be used.
stats_funcs : dict, or list of strings, default=['mean', 'max', 'min',
'sum', 'std', 'var', 'count', 'majority']
The statistics to calculate for each zone. If a list, possible
choices are subsets of the default options.
In the dictionary case, all of its values must be
callable. Function takes only one argument that is the `values` raster.
The key become the column name in the output DataFrame.
Note that if `zones` and `values` are dask backed DataArrays,
`stats_funcs` must be provided as a list that is a subset of
default supported stats.
nodata_values: int, float, default=None
Nodata value in `values` raster.
Cells with `nodata_values` do not belong to any zone,
and thus excluded from calculation.
return_type: str, default='pandas.DataFrame'
Format of returned data. If `zones` and `values` numpy backed xarray DataArray,
allowed values are 'pandas.DataFrame', and 'xarray.DataArray'.
Otherwise, only 'pandas.DataFrame' is supported.
column : str, optional
Column name in the GeoDataFrame that contains zone IDs.
Required when *zones* is a GeoDataFrame; must not be set
for list-of-pairs or DataArray input.
rasterize_kw : dict, optional
Extra keyword arguments forwarded to ``rasterize()`` when
*zones* is vector input (e.g. ``{'all_touched': True}``).
Returns
-------
stats_df : Union[pandas.DataFrame, dask.dataframe.DataFrame]
A pandas DataFrame, or a dask DataFrame where each column
is a statistic and each row is a zone with zone id.
When ``values`` is a Dataset, the returned DataFrame has
columns prefixed by the variable name (e.g. ``elevation_mean``,
``elevation_max``), and ``return_type`` must be
``'pandas.DataFrame'``.
Examples
--------
stats() works with NumPy backed DataArray
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial.zonal import stats
>>> height, width = 10, 10
>>> values_data = np.array([
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47, 48, 49],
[50, 51, 52, 53, 54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65, 66, 67, 68, 69],
[70, 71, 72, 73, 74, 75, 76, 77, 78, 79],
[80, 81, 82, 83, 84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95, 96, 97, 98, 99]])
>>> values = xr.DataArray(values_data)
>>> zones_data = np.array([
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[ 0., 0., 0., 0., 0., 10., 10., 10., 10., 10.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.],
[20., 20., 20., 20., 20., 30., 30., 30., 30., 30.]])
>>> zones = xr.DataArray(zones_data)
>>> # Calculate Stats
>>> stats_df = stats(zones=zones, values=values)
>>> print(stats_df)
zone mean max min sum std var count
0 0 22.0 44 0 550 14.21267 202.0 25
1 10 27.0 49 5 675 14.21267 202.0 25
2 20 72.0 94 50 1800 14.21267 202.0 25
3 30 77.0 99 55 1925 14.21267 202.0 25
>>> # Custom Stats
>>> custom_stats ={'double_sum': lambda val: val.sum()*2}
>>> custom_stats_df = stats(zones=zones,
values=values,
stats_funcs=custom_stats)
>>> print(custom_stats_df)
zone double_sum
0 0 1100
1 10 1350
2 20 3600
3 30 3850
stats() works with Dask with NumPy backed DataArray
>>> import dask.array as da
>>> import dask.array as da
>>> values_dask = xr.DataArray(da.from_array(values_data, chunks=(3, 3)))
>>> zones_dask = xr.DataArray(da.from_array(zones_data, chunks=(3, 3)))
>>> # Calculate Stats with dask backed xarray DataArrays
>>> dask_stats_df = stats(zones=zones_dask, values=values_dask)
>>> print(type(dask_stats_df))
<class 'dask.dataframe.core.DataFrame'>
>>> print(dask_stats_df.compute())
zone mean max min sum std var count
0 0 22.0 44 0 550 14.21267 202.0 25
1 10 27.0 49 5 675 14.21267 202.0 25
2 20 72.0 94 50 1800 14.21267 202.0 25
3 30 77.0 99 55 1925 14.21267 202.0 25
stats() works with 3D time-series DataArrays via Dataset conversion
.. sourcecode:: python
>>> # Convert a 3D time-series DataArray to a Dataset,
>>> # then pass to stats() to get per-timestep statistics.
>>> values_3d = xr.DataArray(
... np.random.rand(2, 10, 10),
... dims=['time', 'dim_0', 'dim_1'],
... coords={'time': [2020, 2021]})
>>> ds = values_3d.to_dataset(dim='time')
>>> stats_df = stats(zones=zones, values=ds)
>>> # Columns: zone, 2020_mean, 2020_max, ..., 2021_mean, 2021_max, ...
"""
zones = _maybe_rasterize_zones(zones, values, column=column,
rasterize_kw=rasterize_kw)
# Dataset support: run stats per variable and merge into one DataFrame
if isinstance(values, xr.Dataset):
if return_type != 'pandas.DataFrame':
raise ValueError(
"return_type must be 'pandas.DataFrame' when values is a Dataset"
)
dfs = []
for var_name in values.data_vars:
df = stats(
zones, values[var_name], zone_ids, stats_funcs,
nodata_values, 'pandas.DataFrame',
)
df = df.rename(
columns={c: f'{var_name}_{c}' for c in df.columns if c != 'zone'}
)
dfs.append(df)
result = dfs[0]
for df in dfs[1:]:
result = result.merge(df, on='zone', how='outer')
return result
_validate_raster(zones, func_name='stats', name='zones', ndim=2)
_validate_raster(values, func_name='stats', name='values', ndim=(2, 3))
validate_arrays(zones, values)
# validate stats_funcs
if has_dask_array() and isinstance(values.data, da.Array) and not isinstance(stats_funcs, list):
raise ValueError(
"Got dask-backed DataArray as `values` aggregate. "
"`stats_funcs` must be a subset of default supported stats "
"`[\'mean\', \'max\', \'min\', \'sum\', \'std\', \'var\', \'count\']`"
)
if isinstance(stats_funcs, list):
# create a dict of stats
stats_funcs_dict = {}
for stat_name in stats_funcs:
func = _DEFAULT_STATS.get(stat_name, None)
if func is None:
err_str = f"Invalid stat name. {stat_name} option not supported."
raise ValueError(err_str)
stats_funcs_dict[stat_name] = func
elif isinstance(stats_funcs, dict):
stats_funcs_dict = stats_funcs.copy()
mapper = ArrayTypeFunctionMapping(
numpy_func=lambda *args: _stats_numpy(*args, return_type=return_type),
dask_func=_stats_dask_numpy,
cupy_func=_stats_cupy,
dask_cupy_func=_stats_dask_cupy,
)
result = mapper(values)(
zones.data, values.data, zone_ids, stats_funcs_dict, nodata_values,
)
if return_type == 'xarray.DataArray':
return xr.DataArray(
result,
coords={'stats': list(stats_funcs_dict.keys()), **values.coords},
dims=('stats', *values.dims),
attrs=values.attrs
)
return result
def _find_cats(values, cat_ids, nodata_values):
if len(values.shape) == 2:
# 2D case
unique_cats = _unique_finite_cats(values.data, nodata_values)
else:
# 3D case
unique_cats = values[values.dims[0]].data
if cat_ids is None:
cat_ids = unique_cats
else:
if isinstance(values.data, np.ndarray) or is_cupy_array(values.data):
# remove cats that do not exist in `values` raster
cat_ids = [c for c in cat_ids if c in unique_cats]
else:
cat_ids = _select_ids(unique_cats, cat_ids)
return unique_cats, cat_ids
def _get_zone_values(values_by_zones, start, end):
if len(values_by_zones.shape) == 1:
# 1D flatten, i.e, original data is 2D
return values_by_zones[start:end]
else:
# 2D flatten, i.e, original data is 3D
return values_by_zones[:, start:end]
def _single_zone_crosstab_2d(
zone_values,
unique_cats,
cat_ids,
nodata_values,
crosstab_dict,
):
# 1D flatten zone_values, i.e, original data is 2D
# filter out non-finite and nodata_values
mask = np.isfinite(zone_values)
if nodata_values is not None:
mask = mask & (zone_values != nodata_values)
zone_values = zone_values[mask]
total_count = zone_values.shape[0]
crosstab_dict[TOTAL_COUNT].append(total_count)
sorted_zone_values = np.sort(zone_values)
zone_cat_breaks = _strides(sorted_zone_values, unique_cats)
cat_start = 0
for j, cat in enumerate(unique_cats):
if cat in cat_ids:
count = zone_cat_breaks[j] - cat_start
crosstab_dict[cat].append(count)
cat_start = zone_cat_breaks[j]
def _single_zone_crosstab_3d(
zone_values,
unique_cats,
cat_ids,
nodata_values,
crosstab_dict,
stats_func
):
# 2D flatten `zone_values`, i.e, original data is 3D
for j, cat in enumerate(unique_cats):
if cat in cat_ids:
zone_cat_data = zone_values[j]
# filter out non-finite and nodata_values
cat_mask = np.isfinite(zone_cat_data)
if nodata_values is not None:
cat_mask = cat_mask & (zone_cat_data != nodata_values)
zone_cat_data = zone_cat_data[cat_mask]
crosstab_dict[cat].append(stats_func(zone_cat_data))
def _crosstab_numpy(
zones: np.ndarray,
values: np.ndarray,
zone_ids: List[Union[int, float]],
unique_cats: np.ndarray,
cat_ids: Union[List, np.ndarray],
nodata_values: Union[int, float],
agg: str,
) -> pd.DataFrame:
# find ids for all zones
unique_zones = _unique_finite_zones(zones)
# selected zones to do analysis
if zone_ids is None:
zone_ids = unique_zones
else:
# remove zones that do not exist in `zones` raster
zone_ids = [z for z in zone_ids if z in unique_zones]
crosstab_dict = {}
crosstab_dict["zone"] = zone_ids
if len(values.shape) == 2:
crosstab_dict[TOTAL_COUNT] = []
for cat in cat_ids:
crosstab_dict[cat] = []
_, values_by_zones, zone_breaks = _sort_and_stride(
zones, values, unique_zones
)
start = 0
for i in range(len(unique_zones)):
end = zone_breaks[i]
if unique_zones[i] in zone_ids:
# get data for zone unique_zones[i]
zone_values = _get_zone_values(values_by_zones, start, end)
if len(values.shape) == 2:
_single_zone_crosstab_2d(