Skip to content

Commit 2b36965

Browse files
committed
backend → engine
1 parent 68dc5d7 commit 2b36965

File tree

4 files changed

+45
-45
lines changed

4 files changed

+45
-45
lines changed

dask_groupby/core.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -32,14 +32,14 @@
3232
FinalResultsDict = Dict[str, Union["DaskArray", np.ndarray]]
3333

3434

35-
def _get_aggregate(backend):
36-
if backend == "numba":
35+
def _get_aggregate(engine):
36+
if engine == "numba":
3737
return npg.aggregate_numba.aggregate
38-
elif backend == "numpy":
38+
elif engine == "numpy":
3939
return npg.aggregate_numpy.aggregate
4040
else:
4141
raise ValueError(
42-
"Expected backend to be one of ['numpy', 'numba']. Received {backend} instead."
42+
"Expected engine to be one of ['numpy', 'numba']. Received {engine} instead."
4343
)
4444

4545

@@ -373,7 +373,7 @@ def chunk_argreduce(
373373
dtype=None,
374374
reindex: bool = False,
375375
isbin: bool = False,
376-
backend: str = "numpy",
376+
engine: str = "numpy",
377377
) -> IntermediateDict:
378378
"""
379379
Per-chunk arg reduction.
@@ -392,7 +392,7 @@ def chunk_argreduce(
392392
fill_value=fill_value,
393393
isbin=isbin,
394394
dtype=dtype,
395-
backend=backend,
395+
engine=engine,
396396
)
397397
if not np.isnan(results["groups"]).all():
398398
# will not work for empty groups...
@@ -422,7 +422,7 @@ def chunk_reduce(
422422
dtype=None,
423423
reindex: bool = False,
424424
isbin: bool = False,
425-
backend: str = "numpy",
425+
engine: str = "numpy",
426426
kwargs=None,
427427
) -> IntermediateDict:
428428
"""
@@ -533,7 +533,7 @@ def chunk_reduce(
533533
**kw,
534534
)
535535
else:
536-
result = _get_aggregate(backend)(
536+
result = _get_aggregate(engine)(
537537
group_idx,
538538
array,
539539
axis=-1,
@@ -648,11 +648,11 @@ def _npg_aggregate(
648648
group_ndim: int,
649649
fill_value: Any = None,
650650
min_count: Optional[int] = None,
651-
backend: str = "numpy",
651+
engine: str = "numpy",
652652
finalize_kwargs: Optional[Mapping] = None,
653653
) -> FinalResultsDict:
654654
"""Final aggregation step of tree reduction"""
655-
results = _npg_combine(x_chunk, agg, axis, keepdims, group_ndim, backend)
655+
results = _npg_combine(x_chunk, agg, axis, keepdims, group_ndim, engine)
656656
return _finalize_results(
657657
results, agg, axis, expected_groups, fill_value, min_count, finalize_kwargs
658658
)
@@ -664,7 +664,7 @@ def _npg_combine(
664664
axis: Sequence,
665665
keepdims: bool,
666666
group_ndim: int,
667-
backend: str,
667+
engine: str,
668668
) -> IntermediateDict:
669669
"""Combine intermediates step of tree reduction."""
670670
from dask.array.core import _concatenate2
@@ -722,7 +722,7 @@ def _conc2(key1, key2=None, axis=None) -> np.ndarray:
722722
expected_groups=None,
723723
fill_value=agg.fill_value["intermediate"][slicer],
724724
dtype=agg.dtype,
725-
backend=backend,
725+
engine=engine,
726726
)
727727

728728
if agg.chunk[-1] == "nanlen":
@@ -737,7 +737,7 @@ def _conc2(key1, key2=None, axis=None) -> np.ndarray:
737737
expected_groups=None,
738738
fill_value=(0,),
739739
dtype=np.intp,
740-
backend=backend,
740+
engine=engine,
741741
)["intermediates"][0]
742742
)
743743

@@ -762,7 +762,7 @@ def _conc2(key1, key2=None, axis=None) -> np.ndarray:
762762
axis=axis,
763763
expected_groups=None,
764764
fill_value=fv,
765-
backend=backend,
765+
engine=engine,
766766
)
767767
results["intermediates"].append(*_results["intermediates"])
768768
results["groups"] = _results["groups"]
@@ -812,7 +812,7 @@ def groupby_agg(
812812
method: str = "mapreduce",
813813
min_count: Optional[int] = None,
814814
isbin: bool = False,
815-
backend: str = "numpy",
815+
engine: str = "numpy",
816816
finalize_kwargs: Optional[Mapping] = None,
817817
) -> Tuple["DaskArray", Union[np.ndarray, "DaskArray"]]:
818818

@@ -851,7 +851,7 @@ def groupby_agg(
851851
fill_value=agg.fill_value["intermediate"],
852852
isbin=isbin,
853853
reindex=split_out > 1,
854-
backend=backend,
854+
engine=engine,
855855
),
856856
inds,
857857
array,
@@ -887,7 +887,7 @@ def groupby_agg(
887887
group_ndim=by.ndim,
888888
fill_value=fill_value,
889889
min_count=min_count,
890-
backend=backend,
890+
engine=engine,
891891
finalize_kwargs=finalize_kwargs,
892892
)
893893

@@ -904,7 +904,7 @@ def groupby_agg(
904904
expected_groups=expected_agg,
905905
**agg_kwargs,
906906
),
907-
combine=partial(_npg_combine, agg=agg, group_ndim=by.ndim, backend=backend),
907+
combine=partial(_npg_combine, agg=agg, group_ndim=by.ndim, engine=engine),
908908
name=f"{name}-reduce",
909909
dtype=array.dtype,
910910
axis=axis,
@@ -1015,7 +1015,7 @@ def groupby_reduce(
10151015
min_count: Optional[int] = None,
10161016
split_out: int = 1,
10171017
method: str = "mapreduce",
1018-
backend: str = "numpy",
1018+
engine: str = "numpy",
10191019
finalize_kwargs: Optional[Mapping] = None,
10201020
) -> Tuple["DaskArray", Union[np.ndarray, "DaskArray"]]:
10211021
"""
@@ -1069,8 +1069,8 @@ def groupby_reduce(
10691069
where the group labels repeat at regular intervals like 'hour',
10701070
'month', dayofyear' etc. Optimize chunking ``array`` for this
10711071
method by first rechunking using ``rechunk_for_cohorts``.
1072-
backend : {"numpy", "numba"}, optional, default: ``"numpy"``
1073-
Backend for ``numpy_groupies``.
1072+
engine : {"numpy", "numba"}, optional, default: ``"numpy"``
1073+
Engine for ``numpy_groupies``.
10741074
finalize_kwargs : dict, optional
10751075
Kwargs passed to finalize the reduction such as ``ddof`` for var, std.
10761076
@@ -1252,7 +1252,7 @@ def groupby_reduce(
12521252
fill_value=fill_value,
12531253
min_count=min_count,
12541254
isbin=isbin,
1255-
backend=backend,
1255+
engine=engine,
12561256
finalize_kwargs=finalize_kwargs,
12571257
)
12581258
if method == "cohorts":

dask_groupby/xarray.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def xarray_reduce(
5858
split_out: int = 1,
5959
fill_value=None,
6060
method: str = "mapreduce",
61-
backend: str = "numpy",
61+
engine: str = "numpy",
6262
keep_attrs: bool = True,
6363
skipna: Optional[bool] = None,
6464
min_count: Optional[int] = None,
@@ -110,8 +110,8 @@ def xarray_reduce(
110110
'month', dayofyear' etc. Optimize chunking ``array`` for this
111111
method by first rechunking using ``rechunk_for_cohorts``.
112112
113-
backend : {"numpy", "numba"}, optional
114-
Backend for numpy_groupies
113+
engine : {"numpy", "numba"}, optional
114+
Engine for numpy_groupies
115115
keep_attrs : bool, optional
116116
Preserve attrs?
117117
skipna : bool, optional
@@ -316,7 +316,7 @@ def wrapper(array, to_group, *, func, skipna, **kwargs):
316316
"method": method,
317317
"min_count": min_count,
318318
"skipna": skipna,
319-
"backend": backend,
319+
"engine": engine,
320320
# The following mess exists because for multiple `by`s I factorize eagerly
321321
# here before passing it on; this means I have to handle the
322322
# "binning by single by variable" case explicitly where the factorization

tests/test_core.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def test_alignment_error():
5656
groupby_reduce(da, labels, func="mean")
5757

5858

59-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
59+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
6060
@pytest.mark.parametrize("dtype", (float, int))
6161
@pytest.mark.parametrize("chunk, split_out", [(False, 1), (True, 1), (True, 2), (True, 3)])
6262
@pytest.mark.parametrize("expected_groups", [None, [0, 1, 2], np.array([0, 1, 2])])
@@ -86,7 +86,7 @@ def test_alignment_error():
8686
],
8787
)
8888
def test_groupby_reduce(
89-
array, by, expected, func, expected_groups, chunk, split_out, dtype, backend
89+
array, by, expected, func, expected_groups, chunk, split_out, dtype, engine
9090
):
9191
array = array.astype(dtype)
9292
if chunk:
@@ -109,18 +109,18 @@ def test_groupby_reduce(
109109
expected_groups=expected_groups,
110110
fill_value=123,
111111
split_out=split_out,
112-
backend=backend,
112+
engine=engine,
113113
)
114114
assert_equal(expected, result)
115115

116116

117-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
117+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
118118
@pytest.mark.parametrize("size", ((12,), (12, 5)))
119119
@pytest.mark.parametrize(
120120
"func",
121121
ALL_FUNCS,
122122
)
123-
def test_groupby_reduce_all(size, func, backend):
123+
def test_groupby_reduce_all(size, func, engine):
124124

125125
by = np.ones(size[-1])
126126
array = np.random.randn(*size)
@@ -141,7 +141,7 @@ def test_groupby_reduce_all(size, func, backend):
141141
expected = getattr(np, func)(array, axis=-1, **kwargs)
142142
expected = np.expand_dims(expected, -1)
143143

144-
actual, _ = groupby_reduce(array, by, func=func, backend=backend, finalize_kwargs=kwargs)
144+
actual, _ = groupby_reduce(array, by, func=func, engine=engine, finalize_kwargs=kwargs)
145145
if "arg" in func:
146146
assert actual.dtype.kind == "i"
147147
assert_equal(actual, expected)
@@ -152,7 +152,7 @@ def test_groupby_reduce_all(size, func, backend):
152152
by,
153153
func=func,
154154
method=method,
155-
backend=backend,
155+
engine=engine,
156156
finalize_kwargs=kwargs,
157157
)
158158
if "arg" in func:
@@ -362,11 +362,11 @@ def test_dask_reduce_axis_subset():
362362

363363

364364
@pytest.mark.parametrize("func", ALL_FUNCS)
365-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
365+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
366366
@pytest.mark.parametrize(
367367
"axis", [None, (0, 1, 2), (0, 1), (0, 2), (1, 2), 0, 1, 2, (0,), (1,), (2,)]
368368
)
369-
def test_groupby_reduce_axis_subset_against_numpy(func, axis, backend):
369+
def test_groupby_reduce_axis_subset_against_numpy(func, axis, engine):
370370
if not isinstance(axis, int) and "arg" in func and (axis is None or len(axis) > 1):
371371
pytest.skip()
372372
if func in ["all", "any"]:
@@ -377,7 +377,7 @@ def test_groupby_reduce_axis_subset_against_numpy(func, axis, backend):
377377
by = np.broadcast_to(labels2d, (3, *labels2d.shape))
378378
array = np.ones_like(by)
379379
kwargs = dict(
380-
func=func, axis=axis, expected_groups=[0, 2], fill_value=fill_value, backend=backend
380+
func=func, axis=axis, expected_groups=[0, 2], fill_value=fill_value, engine=engine
381381
)
382382
with raise_if_dask_computes():
383383
actual, _ = groupby_reduce(
@@ -389,7 +389,7 @@ def test_groupby_reduce_axis_subset_against_numpy(func, axis, backend):
389389
assert_equal(actual, expected)
390390

391391

392-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
392+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
393393
@pytest.mark.parametrize("chunks", [None, (2, 2, 3)])
394394
@pytest.mark.parametrize(
395395
"axis, groups, expected_shape",
@@ -399,7 +399,7 @@ def test_groupby_reduce_axis_subset_against_numpy(func, axis, backend):
399399
(None, [0], (1,)), # global reduction; 0 shaped group axis; 1 group
400400
],
401401
)
402-
def test_groupby_reduce_nans(chunks, axis, groups, expected_shape, backend):
402+
def test_groupby_reduce_nans(chunks, axis, groups, expected_shape, engine):
403403
def _maybe_chunk(arr):
404404
if chunks:
405405
return da.from_array(arr, chunks=chunks)
@@ -419,7 +419,7 @@ def _maybe_chunk(arr):
419419
expected_groups=groups,
420420
axis=axis,
421421
fill_value=0,
422-
backend=backend,
422+
engine=engine,
423423
)
424424
assert_equal(result, np.zeros(expected_shape, dtype=np.int64))
425425

@@ -431,8 +431,8 @@ def _maybe_chunk(arr):
431431
# by = np.broadcast_to(labels2d, (3, *labels2d.shape))
432432

433433

434-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
435-
def test_groupby_all_nan_blocks(backend):
434+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
435+
def test_groupby_all_nan_blocks(engine):
436436
labels = np.array([0, 0, 2, 2, 2, 1, 1, 2, 2, 1, 1, 0])
437437
nan_labels = labels.astype(float) # copy
438438
nan_labels[:5] = np.nan
@@ -448,7 +448,7 @@ def test_groupby_all_nan_blocks(backend):
448448
da.from_array(by, chunks=(1, 3)),
449449
func="sum",
450450
expected_groups=None,
451-
backend=backend,
451+
engine=engine,
452452
)
453453
assert_equal(actual, expected)
454454

tests/test_xarray.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,11 @@
2222
pass
2323

2424

25-
@pytest.mark.parametrize("backend", ["numpy", "numba"])
25+
@pytest.mark.parametrize("engine", ["numpy", "numba"])
2626
@pytest.mark.parametrize("min_count", [None, 1, 3])
2727
@pytest.mark.parametrize("add_nan", [True, False])
2828
@pytest.mark.parametrize("skipna", [True, False])
29-
def test_xarray_reduce(skipna, add_nan, min_count, backend):
29+
def test_xarray_reduce(skipna, add_nan, min_count, engine):
3030
arr = np.ones((4, 12))
3131

3232
if add_nan:
@@ -46,7 +46,7 @@ def test_xarray_reduce(skipna, add_nan, min_count, backend):
4646

4747
expected = da.groupby("labels").sum(skipna=skipna, min_count=min_count)
4848
actual = xarray_reduce(
49-
da, "labels", func="sum", skipna=skipna, min_count=min_count, backend=backend
49+
da, "labels", func="sum", skipna=skipna, min_count=min_count, engine=engine
5050
)
5151
assert_equal(expected, actual)
5252

0 commit comments

Comments
 (0)