|
2 | 2 |
|
3 | 3 | import copy |
4 | 4 | import itertools |
| 5 | +import math |
5 | 6 | import operator |
6 | 7 | from collections import namedtuple |
7 | 8 | from functools import partial, reduce |
@@ -85,7 +86,7 @@ def _move_reduce_dims_to_end(arr: np.ndarray, axis: Sequence) -> np.ndarray: |
85 | 86 |
|
86 | 87 | def _collapse_axis(arr: np.ndarray, naxis: int) -> np.ndarray: |
87 | 88 | """Reshape so that the last `naxis` axes are collapsed to one axis.""" |
88 | | - newshape = arr.shape[:-naxis] + (np.prod(arr.shape[-naxis:]),) |
| 89 | + newshape = arr.shape[:-naxis] + (math.prod(arr.shape[-naxis:]),) |
89 | 90 | return arr.reshape(newshape) |
90 | 91 |
|
91 | 92 |
|
@@ -165,7 +166,7 @@ def find_group_cohorts(labels, chunks, merge=True, method="cohorts"): |
165 | 166 |
|
166 | 167 | # Iterate over each block and create a new block of same shape with "chunk number" |
167 | 168 | shape = tuple(array.blocks.shape[ax] for ax in axis) |
168 | | - blocks = np.empty(np.prod(shape), dtype=object) |
| 169 | + blocks = np.empty(math.prod(shape), dtype=object) |
169 | 170 | for idx, block in enumerate(array.blocks.ravel()): |
170 | 171 | blocks[idx] = np.full(tuple(block.shape[ax] for ax in axis), idx) |
171 | 172 | which_chunk = np.block(blocks.reshape(shape).tolist()).reshape(-1) |
@@ -382,11 +383,11 @@ def offset_labels(labels: np.ndarray, ngroups: int) -> tuple[np.ndarray, int]: |
382 | 383 | """ |
383 | 384 | assert labels.ndim > 1 |
384 | 385 | offset: np.ndarray = ( |
385 | | - labels + np.arange(np.prod(labels.shape[:-1])).reshape((*labels.shape[:-1], -1)) * ngroups |
| 386 | + labels + np.arange(math.prod(labels.shape[:-1])).reshape((*labels.shape[:-1], -1)) * ngroups |
386 | 387 | ) |
387 | 388 | # -1 indicates NaNs. preserve these otherwise we aggregate in the wrong groups! |
388 | 389 | offset[labels == -1] = -1 |
389 | | - size: int = np.prod(labels.shape[:-1]) * ngroups # type: ignore |
| 390 | + size: int = math.prod(labels.shape[:-1]) * ngroups # type: ignore |
390 | 391 | return offset, size |
391 | 392 |
|
392 | 393 |
|
@@ -455,7 +456,7 @@ def factorize_( |
455 | 456 | factorized.append(idx) |
456 | 457 |
|
457 | 458 | grp_shape = tuple(len(grp) for grp in found_groups) |
458 | | - ngroups = np.prod(grp_shape) |
| 459 | + ngroups = math.prod(grp_shape) |
459 | 460 | if len(by) > 1: |
460 | 461 | group_idx = np.ravel_multi_index(factorized, grp_shape, mode="wrap") |
461 | 462 | # NaNs; as well as values outside the bins are coded by -1 |
@@ -630,7 +631,7 @@ def chunk_reduce( |
630 | 631 | groups = groups[0] |
631 | 632 |
|
632 | 633 | # always reshape to 1D along group dimensions |
633 | | - newshape = array.shape[: array.ndim - by.ndim] + (np.prod(array.shape[-by.ndim :]),) |
| 634 | + newshape = array.shape[: array.ndim - by.ndim] + (math.prod(array.shape[-by.ndim :]),) |
634 | 635 | array = array.reshape(newshape) |
635 | 636 |
|
636 | 637 | assert group_idx.ndim == 1 |
@@ -1506,7 +1507,7 @@ def groupby_reduce( |
1506 | 1507 | by, final_groups, grp_shape = _factorize_multiple( |
1507 | 1508 | by, expected_groups, by_is_dask=by_is_dask, reindex=reindex |
1508 | 1509 | ) |
1509 | | - expected_groups = (pd.RangeIndex(np.prod(grp_shape)),) |
| 1510 | + expected_groups = (pd.RangeIndex(math.prod(grp_shape)),) |
1510 | 1511 |
|
1511 | 1512 | assert len(by) == 1 |
1512 | 1513 | by = by[0] |
@@ -1601,7 +1602,7 @@ def groupby_reduce( |
1601 | 1602 | array_subset = array |
1602 | 1603 | for ax, idxr in zip(range(-by.ndim, 0), indexer): |
1603 | 1604 | array_subset = np.take(array_subset, idxr, axis=ax) |
1604 | | - numblocks = np.prod([len(array_subset.chunks[ax]) for ax in axis]) |
| 1605 | + numblocks = math.prod([len(array_subset.chunks[ax]) for ax in axis]) |
1605 | 1606 |
|
1606 | 1607 | # get final result for these groups |
1607 | 1608 | r, *g = partial_agg( |
|
0 commit comments