title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
DEPR: inplace keyword in rename
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bcbe2c6d8b104..b22d53f37cdc7 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -835,6 +835,7 @@ Other Deprecations - Clarified warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument (:issue:`46210`) - Emit warning from :func:`to_datetime` when delimited dates can't be parsed in accordance to specified ``dayfirst`` argument even for dates where leading zero is omitted (e.g. ``31/1/2001``) (:issue:`47880`) - Deprecated :class:`Series` and :class:`Resampler` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) raising a ``NotImplementedError`` when the dtype is non-numric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) +- Deprecated the ``inplace`` keyword in :meth:`DataFrame.rename` and :meth:`Series.rename`, use ``obj = obj.rename(..., copy=False)`` instead (:issue:`48131`) - Deprecated :meth:`Series.rank` returning an empty result when the dtype is non-numeric and ``numeric_only=True`` is provided; this will raise a ``TypeError`` in a future version (:issue:`47500`) - Deprecated argument ``errors`` for :meth:`Series.mask`, :meth:`Series.where`, :meth:`DataFrame.mask`, and :meth:`DataFrame.where` as ``errors`` had no effect on this methods (:issue:`47728`) - Deprecated arguments ``*args`` and ``**kwargs`` in :class:`Rolling`, :class:`Expanding`, and :class:`ExponentialMovingWindow` ops. (:issue:`47836`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 04e168f1ab6ca..2391258fd4539 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5369,7 +5369,7 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[True], level: Level = ..., errors: IgnoreRaise = ..., @@ -5384,8 +5384,8 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool = ..., - inplace: Literal[False] = ..., + copy: bool | lib.NoDefault = ..., + inplace: Literal[False] | lib.NoDefault = ..., level: Level = ..., errors: IgnoreRaise = ..., ) -> DataFrame: @@ -5399,8 +5399,8 @@ def rename( index: Renamer | None = ..., columns: Renamer | None = ..., axis: Axis | None = ..., - copy: bool = ..., - inplace: bool = ..., + copy: bool | lib.NoDefault = ..., + inplace: bool | lib.NoDefault = ..., level: Level = ..., errors: IgnoreRaise = ..., ) -> DataFrame | None: @@ -5413,8 +5413,8 @@ def rename( index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, - copy: bool = True, - inplace: bool = False, + copy: bool | lib.NoDefault = lib.no_default, + inplace: bool | lib.NoDefault = lib.no_default, level: Level = None, errors: IgnoreRaise = "ignore", ) -> DataFrame | None: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 88184285d3683..141957aa4c8cb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1046,12 +1046,29 @@ def _rename( index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, - copy: bool_t = True, - inplace: bool_t = False, + copy: bool_t | lib.NoDefault = lib.no_default, + inplace: bool_t | lib.NoDefault = lib.no_default, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename + if inplace is not lib.no_default: + warnings.warn( + f"The 'inplace' keyword in {type(self).__name__}.rename is " + "deprecated and will be removed in a future version. " + "Use `obj=obj.rename(..., copy=False)` instead.", + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) + else: + inplace = False + + if inplace: + if copy is not lib.no_default: + raise ValueError("Cannot specify copy when inplace=True") + copy = False + elif copy is lib.no_default: + copy = True if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") diff --git a/pandas/core/series.py b/pandas/core/series.py index 13aa12287072c..b3569ea6dcc39 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1915,7 +1915,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") - def _set_name(self, name, inplace=False) -> Series: + def _set_name(self, name, inplace=False, copy=True) -> Series: """ Set the Series name. @@ -1924,9 +1924,29 @@ def _set_name(self, name, inplace=False) -> Series: name : str inplace : bool Whether to modify `self` directly or return a copy. + copy : bool, default True + Whether to make a copy of the underlying data. """ + if inplace is not lib.no_default: + warnings.warn( + f"The 'inplace' keyword in {type(self).__name__}.rename is " + "deprecated and will be removed in a future version. " + "Use `obj=obj.rename(..., copy=False)` instead.", + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) + else: + inplace = False + inplace = validate_bool_kwarg(inplace, "inplace") - ser = self if inplace else self.copy() + if inplace: + if copy is not lib.no_default: + raise ValueError("Cannot pass copy when inplace=True") + copy = False + elif copy is lib.no_default: + copy = True + + ser = self if inplace else self.copy(deep=copy) ser.name = name return ser @@ -4847,7 +4867,7 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., + copy: bool | lib.NoDefault = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., @@ -4860,8 +4880,8 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., - inplace: Literal[False] = ..., + copy: bool | lib.NoDefault = ..., + inplace: Literal[False] | lib.NoDefault = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: @@ -4873,8 +4893,8 @@ def rename( index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., - copy: bool = ..., - inplace: bool = ..., + copy: bool | lib.NoDefault = ..., + inplace: bool | lib.NoDefault = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: @@ -4885,8 +4905,8 @@ def rename( index: Renamer | Hashable | None = None, *, axis: Axis | None = None, - copy: bool = True, - inplace: bool = False, + copy: bool | lib.NoDefault = lib.no_default, + inplace: bool | lib.NoDefault = lib.no_default, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: @@ -4972,7 +4992,7 @@ def rename( errors=errors, ) else: - return self._set_name(index, inplace=inplace) + return self._set_name(index, inplace=inplace, copy=copy) @overload def set_axis( diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index b1594660caec6..86ef0a7506c5b 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -189,7 +189,10 @@ def test_rename_inplace(self, float_frame): c_values = float_frame["C"] float_frame = float_frame.copy() - return_value = float_frame.rename(columns={"C": "foo"}, inplace=True) + + msg = "'inplace' keyword in DataFrame.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = float_frame.rename(columns={"C": "foo"}, inplace=True) assert return_value is None assert "C" not in float_frame diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index af092d433a846..17b6a3eaed8e0 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -273,7 +273,9 @@ def _check_f(base, f): # rename f = lambda x: x.rename({1: "foo"}, inplace=True) - _check_f(data.copy(), f) + msg = "inplace' keyword in DataFrame.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + _check_f(data.copy(), f) # -----Series----- d = data.copy()["c"] @@ -292,7 +294,9 @@ def _check_f(base, f): # rename f = lambda x: x.rename({1: "foo"}, inplace=True) - _check_f(d.copy(), f) + msg = "inplace' keyword in Series.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + _check_f(d.copy(), f) @async_mark() @td.check_file_leaks diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index 0546534d91399..7cbbe585abd65 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -429,11 +429,14 @@ def test_inplace_raises(method, frame_only): s.flags.allows_duplicate_labels = False msg = "Cannot specify" - warn_msg = "Series.set_axis 'inplace' keyword" - if "set_axis" in str(method): + warn_msg = None + warn = None + if "rename" in str(method): + warn_msg = "'inplace' keyword in Series.rename" + warn = FutureWarning + elif "set_axis" in str(method): + warn_msg = "Series.set_axis 'inplace' keyword" warn = FutureWarning - else: - warn = None with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(warn, match=warn_msg): diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py index 729c07b8bdde7..0006b8d3893fc 100644 --- a/pandas/tests/series/methods/test_rename.py +++ b/pandas/tests/series/methods/test_rename.py @@ -56,7 +56,9 @@ def test_rename_set_name(self): def test_rename_set_name_inplace(self): ser = Series(range(3), index=list("abc")) for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - ser.rename(name, inplace=True) + msg = "'inplace' keyword in Series.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser.rename(name, inplace=True) assert ser.name == name exp = np.array(["a", "b", "c"], dtype=np.object_) @@ -75,7 +77,9 @@ def test_rename_inplace(self, datetime_series): renamer = lambda x: x.strftime("%Y%m%d") expected = renamer(datetime_series.index[0]) - datetime_series.rename(renamer, inplace=True) + msg = "'inplace' keyword in Series.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + datetime_series.rename(renamer, inplace=True) assert datetime_series.index[0] == expected def test_rename_with_custom_indexer(self): @@ -94,7 +98,9 @@ class MyIndexer: ix = MyIndexer() ser = Series([1, 2, 3]) - ser.rename(ix, inplace=True) + msg = "'inplace' keyword in Series.rename" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser.rename(ix, inplace=True) assert ser.name is ix def test_rename_callable(self): diff --git a/pandas/tests/series/methods/test_set_name.py b/pandas/tests/series/methods/test_set_name.py index cbc8ebde7a8ab..612b5afab85e6 100644 --- a/pandas/tests/series/methods/test_set_name.py +++ b/pandas/tests/series/methods/test_set_name.py @@ -6,7 +6,7 @@ class TestSetName: def test_set_name(self): ser = Series([1, 2, 3]) - ser2 = ser._set_name("foo") + ser2 = ser._set_name("foo", copy=True) assert ser2.name == "foo" assert ser.name is None assert ser is not ser2 diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py index 3c867f7582b7d..a517886b468e7 100644 --- a/pandas/tests/series/test_validate.py +++ b/pandas/tests/series/test_validate.py @@ -1,5 +1,7 @@ import pytest +import pandas._testing as tm + @pytest.mark.parametrize( "func", @@ -19,8 +21,14 @@ def test_validate_bool_args(string_series, func, inplace): msg = 'For argument "inplace" expected type bool' kwargs = {"inplace": inplace} + warn_msg = "'inplace' keyword in Series.rename" + warn = None if func == "_set_name": kwargs["name"] = "hello" + warn = FutureWarning + elif func == "rename": + warn = FutureWarning with pytest.raises(ValueError, match=msg): - getattr(string_series, func)(**kwargs) + with tm.assert_produces_warning(warn, match=warn_msg): + getattr(string_series, func)(**kwargs)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48131
2022-08-17T20:11:49Z
2022-09-14T21:31:53Z
null
2022-09-14T21:31:55Z
DEPR: set_axis inplace keyword
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0ceac8aeb9db8..c95660bf86114 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -847,6 +847,7 @@ Other Deprecations - Deprecated the ``inplace`` keyword in :meth:`Categorical.set_ordered`, :meth:`Categorical.as_ordered`, and :meth:`Categorical.as_unordered` (:issue:`37643`) - Deprecated setting a categorical's categories with ``cat.categories = ['a', 'b', 'c']``, use :meth:`Categorical.rename_categories` instead (:issue:`37643`) - Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`) +- Deprecated the ``inplace`` keyword in :meth:`DataFrame.set_axis` and :meth:`Series.set_axis`, use ``obj = obj.set_axis(..., copy=False)`` instead (:issue:`48130`) - Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) - Fixed up warning message of deprecation of :meth:`MultiIndex.lesort_depth` as public method, as the message previously referred to :meth:`MultiIndex.is_lexsorted` instead (:issue:`38701`) - Deprecated the ``sort_columns`` argument in :meth:`DataFrame.plot` and :meth:`Series.plot` (:issue:`47563`). diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7a4f41da5840c..93096126d0682 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5060,7 +5060,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: Literal[False] = ..., + inplace: Literal[False] | lib.NoDefault = ..., copy: bool | lib.NoDefault = ..., ) -> DataFrame: ... @@ -5082,7 +5082,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: bool = ..., + inplace: bool | lib.NoDefault = ..., copy: bool | lib.NoDefault = ..., ) -> DataFrame | None: ... @@ -5111,10 +5111,9 @@ def set_axis( 1 2 5 2 3 6 - Now, update the labels inplace. + Now, update the labels without copying the underlying data. - >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True) - >>> df + >>> df.set_axis(['i', 'ii'], axis='columns', copy=False) i ii 0 1 4 1 2 5 @@ -5132,7 +5131,7 @@ def set_axis( self, labels, axis: Axis = 0, - inplace: bool = False, + inplace: bool | lib.NoDefault = lib.no_default, *, copy: bool | lib.NoDefault = lib.no_default, ): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 45b839f175a88..1cf308f06c7c7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -715,7 +715,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: Literal[False] = ..., + inplace: Literal[False] | lib.NoDefault = ..., copy: bool_t | lib.NoDefault = ..., ) -> NDFrameT: ... @@ -737,7 +737,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: bool_t = ..., + inplace: bool_t | lib.NoDefault = ..., copy: bool_t | lib.NoDefault = ..., ) -> NDFrameT | None: ... @@ -747,7 +747,7 @@ def set_axis( self: NDFrameT, labels, axis: Axis = 0, - inplace: bool_t = False, + inplace: bool_t | lib.NoDefault = lib.no_default, *, copy: bool_t | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: @@ -769,6 +769,8 @@ def set_axis( inplace : bool, default False Whether to return a new %(klass)s instance. + .. deprecated:: 1.5.0 + copy : bool, default True Whether to make a copy of the underlying data. @@ -783,6 +785,17 @@ def set_axis( -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ + if inplace is not lib.no_default: + warnings.warn( + f"{type(self).__name__}.set_axis 'inplace' keyword is deprecated " + "and will be removed in a future version. Use " + "`obj = obj.set_axis(..., copy=False)` instead", + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) + else: + inplace = False + if inplace: if copy is True: raise ValueError("Cannot specify both inplace=True and copy=True") @@ -795,14 +808,13 @@ def set_axis( @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t, copy: bool_t): - # NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy. if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy) - obj.set_axis(labels, axis=axis, inplace=True) + setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: int, labels: AnyArrayLike | list) -> None: @@ -904,7 +916,7 @@ def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) - return self.set_axis(new_labels, axis=axis, inplace=False) + return self.set_axis(new_labels, axis=axis) def pop(self, item: Hashable) -> Series | Any: result = self[item] @@ -1363,7 +1375,11 @@ def _set_axis_name(self, name, axis=0, inplace=False): inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy() - renamed.set_axis(idx, axis=axis, inplace=True) + if axis == 0: + renamed.index = idx + else: + renamed.columns = idx + if not inplace: return renamed @@ -10205,8 +10221,7 @@ def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] - new_obj.set_axis(shifted_axis, axis=axis, inplace=True) - + new_obj = new_obj.set_axis(shifted_axis, axis=axis, copy=False) return new_obj.__finalize__(self, method="slice_shift") @final @@ -10465,7 +10480,7 @@ def _tz_convert(ax, tz): ax = _tz_convert(ax, tz) result = self.copy(deep=copy) - result = result.set_axis(ax, axis=axis, inplace=False) + result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") @final @@ -10635,7 +10650,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent): ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy) - result = result.set_axis(ax, axis=axis, inplace=False) + result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 9523857925f8f..43316873a2ea7 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1200,21 +1200,22 @@ def _set_result_index_ordered( # set the result index on the passed values object and # return the new object, xref 8046 + obj_axis = self.obj._get_axis(self.axis) + if self.grouper.is_monotonic and not self.grouper.has_dropped_na: # shortcut if we have an already ordered grouper - result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True) + result = result.set_axis(obj_axis, axis=self.axis, copy=False) return result # row order is scrambled => sort the rows by position in original index original_positions = Index(self.grouper.result_ilocs()) - result.set_axis(original_positions, axis=self.axis, inplace=True) + result = result.set_axis(original_positions, axis=self.axis, copy=False) result = result.sort_index(axis=self.axis) - obj_axis = self.obj._get_axis(self.axis) if self.grouper.has_dropped_na: # Add back in any missing rows due to dropna - index here is integral # with values referring to the row of the input so can use RangeIndex result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis) - result.set_axis(obj_axis, axis=self.axis, inplace=True) + result = result.set_axis(obj_axis, axis=self.axis, copy=False) return result diff --git a/pandas/core/series.py b/pandas/core/series.py index f55d6a26255a0..13aa12287072c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4980,7 +4980,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: Literal[False] = ..., + inplace: Literal[False] | lib.NoDefault = ..., copy: bool | lib.NoDefault = ..., ) -> Series: ... @@ -5002,7 +5002,7 @@ def set_axis( labels, *, axis: Axis = ..., - inplace: bool = ..., + inplace: bool | lib.NoDefault = ..., copy: bool | lib.NoDefault = ..., ) -> Series | None: ... @@ -5038,7 +5038,7 @@ def set_axis( # type: ignore[override] self, labels, axis: Axis = 0, - inplace: bool = False, + inplace: bool | lib.NoDefault = lib.no_default, copy: bool | lib.NoDefault = lib.no_default, ) -> Series | None: return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy) diff --git a/pandas/tests/frame/methods/test_set_axis.py b/pandas/tests/frame/methods/test_set_axis.py index 67488dff3c335..f105a38e6fdd0 100644 --- a/pandas/tests/frame/methods/test_set_axis.py +++ b/pandas/tests/frame/methods/test_set_axis.py @@ -21,7 +21,9 @@ def test_set_axis(self, obj): expected.index = new_index # inplace=False - result = obj.set_axis(new_index, axis=0, inplace=False) + msg = "set_axis 'inplace' keyword is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = obj.set_axis(new_index, axis=0, inplace=False) tm.assert_equal(expected, result) def test_set_axis_copy(self, obj): @@ -35,7 +37,8 @@ def test_set_axis_copy(self, obj): with pytest.raises( ValueError, match="Cannot specify both inplace=True and copy=True" ): - obj.set_axis(new_index, axis=0, inplace=True, copy=True) + with tm.assert_produces_warning(FutureWarning): + obj.set_axis(new_index, axis=0, inplace=True, copy=True) result = obj.set_axis(new_index, axis=0, copy=True) tm.assert_equal(expected, result) @@ -75,7 +78,8 @@ def test_set_axis_copy(self, obj): ) # Do this last since it alters obj inplace - res = obj.set_axis(new_index, inplace=True, copy=False) + with tm.assert_produces_warning(FutureWarning): + res = obj.set_axis(new_index, inplace=True, copy=False) assert res is None tm.assert_equal(expected, obj) # check we did NOT make a copy @@ -103,7 +107,8 @@ def test_set_axis_inplace_axis(self, axis, obj): expected.columns = new_index result = obj.copy() - result.set_axis(new_index, axis=axis, inplace=True) + with tm.assert_produces_warning(FutureWarning): + result.set_axis(new_index, axis=axis, inplace=True) tm.assert_equal(result, expected) def test_set_axis_unnamed_kwarg_warns(self, obj): @@ -113,7 +118,9 @@ def test_set_axis_unnamed_kwarg_warns(self, obj): expected = obj.copy() expected.index = new_index - with tm.assert_produces_warning(None): + with tm.assert_produces_warning( + FutureWarning, match="set_axis 'inplace' keyword" + ): result = obj.set_axis(new_index, inplace=False) tm.assert_equal(result, expected) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py index c83c8e1d568e6..0546534d91399 100644 --- a/pandas/tests/generic/test_duplicate_labels.py +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -429,11 +429,19 @@ def test_inplace_raises(method, frame_only): s.flags.allows_duplicate_labels = False msg = "Cannot specify" + warn_msg = "Series.set_axis 'inplace' keyword" + if "set_axis" in str(method): + warn = FutureWarning + else: + warn = None + with pytest.raises(ValueError, match=msg): - method(df) + with tm.assert_produces_warning(warn, match=warn_msg): + method(df) if not frame_only: with pytest.raises(ValueError, match=msg): - method(s) + with tm.assert_produces_warning(warn, match=warn_msg): + method(s) def test_pickle():
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48130
2022-08-17T19:54:38Z
2022-08-18T16:17:59Z
2022-08-18T16:17:59Z
2022-08-18T16:59:52Z
BUG: Fixed inconsistent offset behavior for series #43784
diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 07d406ae7d779..fce92f2aabc8f 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -127,6 +127,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - Bug in :func:`pandas.infer_freq`, raising ``TypeError`` when inferred on :class:`RangeIndex` (:issue:`47084`) +- Bug where adding a :class:`DateOffset` to a :class:`DatetimeIndex` or :class:`Series` over a Daylight Savings Time boundary would produce an incorrect result (:issue:`43784`) - Timedelta diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7be7381bcb4d1..6d6999d11ed81 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1148,6 +1148,7 @@ cdef class Day(Tick): _td64_unit = "D" _period_dtype_code = PeriodDtypeCode.D _reso = NPY_DATETIMEUNIT.NPY_FR_D + _use_relativedelta = True cdef class Hour(Tick): @@ -1495,6 +1496,7 @@ cdef class BusinessMixin(SingleConstructorOffset): """ Mixin to business types to provide related functions. """ + _use_relativedelta = True cdef readonly: timedelta _offset @@ -2068,6 +2070,7 @@ cdef class WeekOfMonthMixin(SingleConstructorOffset): """ Mixin for methods common to WeekOfMonth and LastWeekOfMonth. """ + _use_relativedelta = True cdef readonly: int weekday, week @@ -2112,6 +2115,7 @@ cdef class YearOffset(SingleConstructorOffset): DateOffset that just needs a month. """ _attributes = tuple(["n", "normalize", "month"]) + _use_relativedelta = True # FIXME(cython#4446): python annotation here gives compile-time errors # _default_month: int @@ -2277,6 +2281,7 @@ cdef class QuarterOffset(SingleConstructorOffset): # FIXME(cython#4446): python annotation here gives compile-time errors # _default_starting_month: int # _from_name_starting_month: int + _use_relativedelta = True cdef readonly: int startingMonth @@ -2448,6 +2453,8 @@ cdef class QuarterBegin(QuarterOffset): # Month-Based Offset Classes cdef class MonthOffset(SingleConstructorOffset): + _use_relativedelta = True + def is_on_offset(self, dt: datetime) -> bool: if self.normalize and not _is_normalized(dt): return False @@ -2548,6 +2555,7 @@ cdef class SemiMonthOffset(SingleConstructorOffset): _default_day_of_month = 15 _min_day_of_month = 2 _attributes = tuple(["n", "normalize", "day_of_month"]) + _use_relativedelta = True cdef readonly: int day_of_month @@ -2750,6 +2758,7 @@ cdef class Week(SingleConstructorOffset): _inc = timedelta(weeks=1) _prefix = "W" _attributes = tuple(["n", "normalize", "weekday"]) + _use_relativedelta = True cdef readonly: object weekday # int or None @@ -3027,6 +3036,7 @@ cdef class LastWeekOfMonth(WeekOfMonthMixin): # Special Offset Classes cdef class FY5253Mixin(SingleConstructorOffset): + _use_relativedelta = True cdef readonly: int startingMonth int weekday @@ -3496,6 +3506,7 @@ cdef class Easter(SingleConstructorOffset): >>> ts + pd.offsets.Easter() Timestamp('2022-04-17 00:00:00') """ + _use_relativedelta = True cpdef __setstate__(self, state): self.n = state.pop("n") diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e96e9b44112d6..5d6941ade2849 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -694,7 +694,10 @@ def _add_offset(self, offset) -> DatetimeArray: assert not isinstance(offset, Tick) if self.tz is not None: - values = self.tz_localize(None) + if not offset._use_relativedelta: + values = self.tz_convert("utc").tz_localize(None) + else: + values = self.tz_localize(None) else: values = self @@ -716,7 +719,10 @@ def _add_offset(self, offset) -> DatetimeArray: result = DatetimeArray._simple_new(result, dtype=result.dtype) if self.tz is not None: # FIXME: tz_localize with non-nano - result = result.tz_localize(self.tz) + if not offset._use_relativedelta: + result = result.tz_localize("utc").tz_convert(self.tz) + else: + result = result.tz_localize(self.tz) return result diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py index 9c6d6a686e9a5..9e61a119994ab 100644 --- a/pandas/tests/tseries/offsets/test_dst.py +++ b/pandas/tests/tseries/offsets/test_dst.py @@ -30,6 +30,8 @@ YearEnd, ) +import pandas as pd +import pandas._testing as tm from pandas.tests.tseries.offsets.test_offsets import get_utc_offset_hours from pandas.util.version import Version @@ -228,3 +230,30 @@ def test_nontick_offset_with_ambiguous_time_error(original_dt, target_dt, offset msg = f"Cannot infer dst time from {target_dt}, try using the 'ambiguous' argument" with pytest.raises(pytz.AmbiguousTimeError, match=msg): localized_dt + offset + + +def test_series_dst_addition(): + # GH#43784 + startdates = pd.Series( + [ + Timestamp("2020-10-25", tz="Europe/Berlin"), + Timestamp("2017-03-12", tz="US/Pacific"), + ] + ) + offset1 = DateOffset(hours=3) + offset2 = DateOffset(days=1) + + expected1 = pd.Series( + [Timestamp("2020-10-25 02:00:00+01:00"), Timestamp("2017-03-12 04:00:00-07:00")] + ) + + expected2 = pd.Series( + [Timestamp("2020-10-26 00:00:00+01:00"), Timestamp("2017-03-13 00:00:00-07:00")] + ) + + result1 = startdates + offset1 + result2 = startdates + offset2 + + tm.assert_series_equal(result1, expected1) + + tm.assert_series_equal(result2, expected2)
- [ ] closes #43784 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48129
2022-08-17T17:37:35Z
2022-11-17T01:31:00Z
null
2022-11-17T01:31:00Z
DEPR: positional arguments of read/to_state
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0ceac8aeb9db8..bad5b15bf45d6 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -850,6 +850,7 @@ Other Deprecations - Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) - Fixed up warning message of deprecation of :meth:`MultiIndex.lesort_depth` as public method, as the message previously referred to :meth:`MultiIndex.is_lexsorted` instead (:issue:`38701`) - Deprecated the ``sort_columns`` argument in :meth:`DataFrame.plot` and :meth:`Series.plot` (:issue:`47563`). +- Deprecated positional arguments for all but the first argument of :meth:`DataFrame.to_stata` and :func:`read_stata`, use keyword arguments instead (:issue:`48128`). .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7a4f41da5840c..8eb43d95ea1ec 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2576,6 +2576,7 @@ def _from_arrays( compression_options=_shared_docs["compression_options"] % "path", ) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") + @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "path"]) def to_stata( self, path: FilePath | WriteBuffer[bytes], diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e59b6c8770389..eb3993d13b7b0 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -49,6 +49,7 @@ ) from pandas.util._decorators import ( Appender, + deprecate_nonkeyword_arguments, doc, ) @@ -1980,6 +1981,7 @@ def value_labels(self) -> dict[str, dict[float, str]]: @Appender(_read_stata_doc) +@deprecate_nonkeyword_arguments(version=None, allowed_args=["filepath_or_buffer"]) def read_stata( filepath_or_buffer: FilePath | ReadBuffer[bytes], convert_dates: bool = True, diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index f06bf0035c7dc..6e8c415825c89 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -285,7 +285,7 @@ def test_read_write_dta5(self): original.index.name = "index" with tm.ensure_clean() as path: - original.to_stata(path, None) + original.to_stata(path, convert_dates=None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index("index"), original) @@ -297,7 +297,7 @@ def test_write_dta6(self, datapath): original["quarter"] = original["quarter"].astype(np.int32) with tm.ensure_clean() as path: - original.to_stata(path, None) + original.to_stata(path, convert_dates=None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal( written_and_read_again.set_index("index"), @@ -317,7 +317,7 @@ def test_read_write_dta10(self, version): original["integer"] = original["integer"].astype(np.int32) with tm.ensure_clean() as path: - original.to_stata(path, {"datetime": "tc"}, version=version) + original.to_stata(path, convert_dates={"datetime": "tc"}, version=version) written_and_read_again = self.read_dta(path) # original.index is np.int32, read index is np.int64 tm.assert_frame_equal( @@ -377,7 +377,7 @@ def test_read_write_dta11(self): with tm.ensure_clean() as path: with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path, None) + original.to_stata(path, convert_dates=None) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted) @@ -412,7 +412,7 @@ def test_read_write_dta12(self, version): with tm.ensure_clean() as path: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", InvalidColumnName) - original.to_stata(path, None, version=version) + original.to_stata(path, convert_dates=None, version=version) # should get a warning for that format. assert len(w) == 1 @@ -453,7 +453,7 @@ def test_read_write_reread_dta14(self, file, parsed_114, version, datapath): tm.assert_frame_equal(parsed_114, parsed) with tm.ensure_clean() as path: - parsed_114.to_stata(path, {"date_td": "td"}, version=version) + parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index("index"), parsed_114) @@ -573,7 +573,7 @@ def test_dates_invalid_column(self): original.index.name = "index" with tm.ensure_clean() as path: with tm.assert_produces_warning(InvalidColumnName): - original.to_stata(path, {0: "tc"}) + original.to_stata(path, convert_dates={0: "tc"}) written_and_read_again = self.read_dta(path) modified = original.copy() @@ -623,7 +623,7 @@ def test_date_export_formats(self): expected = DataFrame([expected_values], columns=columns) expected.index.name = "index" with tm.ensure_clean() as path: - original.to_stata(path, conversions) + original.to_stata(path, convert_dates=conversions) written_and_read_again = self.read_dta(path) tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48128
2022-08-17T17:33:25Z
2022-08-17T21:15:14Z
2022-08-17T21:15:14Z
2022-09-21T15:29:01Z
POC/REF: remove axes from Managers
diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx index 3354290a5f535..e6cbfb2c11017 100644 --- a/pandas/_libs/properties.pyx +++ b/pandas/_libs/properties.pyx @@ -61,9 +61,10 @@ cdef class AxisProperty: if obj is None: # Only instances have _mgr, not classes return self + if self.axis == 0: + return obj._index else: - axes = obj._mgr.axes - return axes[self.axis] + return obj._columns def __set__(self, obj, value): obj._set_axis(self.axis, value) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 48822d9d01ddb..3faac6858e7af 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -726,7 +726,8 @@ def apply(self) -> DataFrame | Series: with np.errstate(all="ignore"): results = self.obj._mgr.apply("apply", func=self.f) # _constructor will retain self.index and self.columns - return self.obj._constructor(data=results) + axes_dict = self.obj._construct_axes_dict() + return self.obj._constructor(data=results, **axes_dict) # broadcasting if self.result_type == "broadcast": @@ -1001,6 +1002,7 @@ def series_generator(self): # We create one Series object, and will swap out the data inside # of it. Kids: don't do this at home. ser = self.obj._ixs(0, axis=0) + index = ser.index mgr = ser._mgr if is_extension_array_dtype(ser.dtype): @@ -1012,9 +1014,10 @@ def series_generator(self): else: for (arr, name) in zip(values, self.index): - # GH#35462 re-pin mgr in case setitem changed it + # GH#35462 re-pin mgr, index in case setitem changed it ser._mgr = mgr mgr.set_values(arr) + ser._index = index object.__setattr__(ser, "_name", name) yield ser diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index 4e8e4ea7e8d87..871eb187802f0 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -358,6 +358,8 @@ def _reconstruct(result): return result if isinstance(result, BlockManager): # we went through BlockManager.apply e.g. np.sqrt + # TODO: any cases that aren't index/columns-preserving? + reconstruct_kwargs.update(self._construct_axes_dict()) result = self._constructor(result, **reconstruct_kwargs, copy=False) else: # we converted an array, lost our axes diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 22ccd1d763769..9808b4def6e6f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -590,7 +590,7 @@ class DataFrame(NDFrame, OpsMixin): 2 2 3 """ - _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set + _internal_names_set = {"_columns", "columns", "_index", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} @@ -621,11 +621,25 @@ def __init__( dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): + if index is None: + index = data.index + if columns is None: + columns = data.columns data = data._mgr if isinstance(data, (BlockManager, ArrayManager)): # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) + if index is None or columns is None: + assert False + if data.axes[0] is not columns or data.axes[1] is not index: + # FIXME: without this check, json tests segfault... + # nope, segfaults even with this check + data.axes = [ensure_index(columns), ensure_index(index)] + #if not index.equals(data.axes[-1]):#index is not data.axes[-1]: + # assert False + #if not columns.equals(data.axes[0]):#columns is not data.axes[0]: + # assert False if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) @@ -751,7 +765,7 @@ def __init__( index, # type: ignore[arg-type] dtype, ) - mgr = arrays_to_mgr( + mgr, _, _ = arrays_to_mgr( arrays, columns, index, @@ -794,7 +808,7 @@ def __init__( construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] - mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) + mgr, _, _ = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, @@ -2399,9 +2413,9 @@ def maybe_reorder( columns = columns.drop(exclude) manager = get_option("mode.data_manager") - mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) + mgr, index, columns = arrays_to_mgr(arrays, columns, result_index, typ=manager) - return cls(mgr) + return cls(mgr, index=index, columns=columns) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None @@ -2603,7 +2617,7 @@ def _from_arrays( columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") - mgr = arrays_to_mgr( + mgr, index, columns = arrays_to_mgr( arrays, columns, index, @@ -2611,7 +2625,7 @@ def _from_arrays( verify_integrity=verify_integrity, typ=manager, ) - return cls(mgr) + return cls(mgr, index=index, columns=columns) @doc( storage_options=_shared_docs["storage_options"], @@ -3729,7 +3743,7 @@ def _ixs(self, i: int, axis: int = 0) -> Series: # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None - result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( + result = self._constructor_sliced(new_mgr, index=self.columns, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) @@ -4154,6 +4168,7 @@ def _set_item_mgr(self, key, value: ArrayLike) -> None: except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) + self._columns = self.columns.insert(len(self._info_axis), key) else: self._iset_item_mgr(loc, value) @@ -4257,6 +4272,7 @@ def _ensure_valid_index(self, value) -> None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) + self._index = index_copy def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ @@ -4267,7 +4283,7 @@ def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager - return klass(values, name=name, fastpath=True).__finalize__(self) + return klass(values, name=name, index=self.index, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching @@ -4486,6 +4502,8 @@ def query(self, expr: str, inplace: bool = False, **kwargs) -> DataFrame | None: if inplace: self._update_inplace(result) + self._index = result._index + self._columns = result._columns return None else: return result @@ -4754,8 +4772,10 @@ def predicate(arr: ArrayLike) -> bool: return True - mgr = self._mgr._get_data_subset(predicate).copy(deep=None) - return type(self)(mgr).__finalize__(self) + mgr, taker = self._mgr._get_data_subset(predicate) + mgr = mgr.copy(deep=None) + columns = self.columns.take(taker) + return type(self)(mgr, columns=columns, index=self.index).__finalize__(self) def insert( self, @@ -4824,6 +4844,7 @@ def insert( value = self._sanitize_column(value) self._mgr.insert(loc, column, value) + self._columns = self.columns.insert(loc, column) def assign(self, **kwargs) -> DataFrame: r""" @@ -5855,7 +5876,7 @@ def shift( fill_value=fill_value, allow_dups=True, ) - res_df = self._constructor(mgr) + res_df = self._constructor(mgr, columns=self.columns, index=self.index) return res_df.__finalize__(self, method="shift") return super().shift( @@ -6382,7 +6403,8 @@ class max type @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isna(self) -> DataFrame: - result = self._constructor(self._mgr.isna(func=isna)) + axes_dict = self._construct_axes_dict() + result = self._constructor(self._mgr.isna(func=isna), **axes_dict) return result.__finalize__(self, method="isna") @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) @@ -6587,6 +6609,8 @@ def dropna( if not inplace: return result self._update_inplace(result) + self._columns = result._columns + self._index = result._index return None @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "subset"]) @@ -6685,6 +6709,8 @@ def drop_duplicates( if inplace: self._update_inplace(result) + self._index = result._index + self._columns = result._columns return None else: return result @@ -6934,16 +6960,27 @@ def sort_values( # type: ignore[override] else: return self.copy() + bm_axis = self._get_block_manager_axis(axis) + new_data = self._mgr.take( - indexer, axis=self._get_block_manager_axis(axis), verify=False + indexer, axis=bm_axis, verify=False ) + axis_name = self._get_axis_name(axis) + + axes_dict = {} + axes_dict[axis_name] = self.axes[axis].take(indexer) + if axis == 0: + axes_dict["columns"] = self.columns + else: + axes_dict["index"] = self.index + if ignore_index: - new_data.set_axis( - self._get_block_manager_axis(axis), default_index(len(indexer)) - ) + rng = default_index(len(indexer)) + new_data.set_axis(bm_axis, rng) + axes_dict[axis_name] = rng - result = self._constructor(new_data) + result = self._constructor(new_data, **axes_dict) if inplace: return self._update_inplace(result) else: @@ -7627,7 +7664,7 @@ def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None): # i.e. scalar, faster than checking np.ndim(right) == 0 with np.errstate(all="ignore"): bm = self._mgr.apply(array_op, right=right) - return self._constructor(bm) + return self._constructor(bm, index=self.index, columns=self.columns) elif isinstance(right, DataFrame): assert self.index.equals(right.index) @@ -7648,7 +7685,7 @@ def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None): right._mgr, # type: ignore[arg-type] array_op, ) - return self._constructor(bm) + return self._constructor(bm, index=self.index, columns=self.columns) elif isinstance(right, Series) and axis == 1: # axis=1 means we want to operate row-by-row @@ -9239,7 +9276,7 @@ def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: axis = 0 new_data = self._mgr.diff(n=periods, axis=axis) - return self._constructor(new_data).__finalize__(self, "diff") + return self._constructor(new_data, index=self.index, columns=self.columns).__finalize__(self, "diff") # ---------------------------------------------------------------------- # Function application @@ -10850,8 +10887,9 @@ def _reduce( # cols = self.columns[~dt64_cols] # self = self[cols] predicate = lambda x: not is_datetime64_any_dtype(x.dtype) - mgr = self._mgr._get_data_subset(predicate) - self = type(self)(mgr) + mgr, taker = self._mgr._get_data_subset(predicate) + columns = self.columns[taker] + self = type(self)(mgr, index=self.index, columns=columns) # TODO: Make other agg func handle axis=None properly GH#21597 axis = self._get_axis_number(axis) @@ -10900,7 +10938,20 @@ def _get_data() -> DataFrame: # After possibly _get_data and transposing, we are now in the # simple case where we can use BlockManager.reduce res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures) - out = df._constructor(res).iloc[0] + index = Index([None], dtype=object) + assert index.equals(res.axes[1]) + if ignore_failures: + if len(res.items) == len(df.columns): + # i.e. nothing was dropped + columns = df.columns + else: + # FIXME: get axes without mgr.axes; THIS IS WRONG TOO + columns = res.axes[0] + else: + columns = df.columns + assert columns.equals(res.axes[0]) + + out = df._constructor(res, index=index, columns=columns).iloc[0] if out_dtype is not None: out = out.astype(out_dtype) if axis == 0 and len(self) == 0 and name in ["sum", "prod"]: @@ -11398,7 +11449,8 @@ def quantile( res = data._mgr.take(indexer[q_idx], verify=False) res.axes[1] = q - result = self._constructor(res) + # FIXME: get axes without mgr.axes + result = self._constructor(res, columns=res.axes[0], index=res.axes[1]) return result.__finalize__(self, method="quantile") @doc(NDFrame.asfreq, **_shared_doc_kwargs) @@ -11665,9 +11717,9 @@ def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame: _info_axis_name = "columns" index = properties.AxisProperty( - axis=1, doc="The index (row labels) of the DataFrame." + axis=0, doc="The index (row labels) of the DataFrame." ) - columns = properties.AxisProperty(axis=0, doc="The column labels of the DataFrame.") + columns = properties.AxisProperty(axis=1, doc="The column labels of the DataFrame.") @property def _AXIS_NUMBERS(self) -> dict[str, int]: @@ -11702,8 +11754,9 @@ def _to_dict_of_blocks(self, copy: bool = True): # convert to BlockManager if needed -> this way support ArrayManager as well mgr = mgr_to_mgr(mgr, "block") mgr = cast(BlockManager, mgr) + # FIXME: get axes without mgr.axes return { - k: self._constructor(v).__finalize__(self) + k: self._constructor(v, index=self.index, columns=v.axes[0]).__finalize__(self) for k, v, in mgr.to_dict(copy=copy).items() } diff --git a/pandas/core/generic.py b/pandas/core/generic.py index aa9845a2abb78..f744d0a2cc7a6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -32,7 +32,7 @@ import numpy as np from pandas._config import config - +from pandas.core.indexers import maybe_convert_indices from pandas._libs import lib from pandas._libs.tslibs import ( Period, @@ -276,6 +276,13 @@ def __init__( object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) + # FIXME: get axes without data.axes + if self.ndim == 1: + object.__setattr__(self, "_index", data.axes[0]) + else: + object.__setattr__(self, "_index", data.axes[1]) + object.__setattr__(self, "_columns", data.axes[0]) + @classmethod def _init_mgr( cls, @@ -820,8 +827,29 @@ def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool_t, copy: bool_t): def _set_axis(self, axis: int, labels: AnyArrayLike | list) -> None: labels = ensure_index(labels) - self._mgr.set_axis(axis, labels) + self._validate_set_axis(axis, labels) self._clear_item_cache() + if axis == 0: + object.__setattr__(self, "_index", labels) + else: + object.__setattr__(self, "_columns", labels) + + @final + def _validate_set_axis(self, axis: int, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + old_len = self.shape[axis] + new_len = len(new_labels) + + if self.ndim > 1 and axis == 0 and len(self.columns) == 0: + # If we are setting the index on a DataFrame with no columns, + # it is OK to change the length. + pass + + elif new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) @final def swapaxes( @@ -1475,6 +1503,10 @@ def equals(self, other: object) -> bool_t: if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) + if self.ndim != other.ndim: + return False + if not all(left.equals(right) for left, right in zip(self.axes, other.axes)): + return False return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- @@ -1495,7 +1527,8 @@ def blk_func(values: ArrayLike): return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) - res = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + res = self._constructor(new_data, **axes_dict) return res.__finalize__(self, method="__neg__") @final @@ -1510,7 +1543,8 @@ def blk_func(values: ArrayLike): return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) - res = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + res = self._constructor(new_data, **axes_dict) return res.__finalize__(self, method="__pos__") @final @@ -1520,7 +1554,8 @@ def __invert__(self: NDFrameT) -> NDFrameT: return self new_data = self._mgr.apply(operator.invert) - return self._constructor(new_data).__finalize__(self, method="__invert__") + axes_dict = self._construct_axes_dict() + return self._constructor(new_data, **axes_dict).__finalize__(self, method="__invert__") @final def __nonzero__(self) -> NoReturn: @@ -1647,7 +1682,8 @@ def abs(self: NDFrameT) -> NDFrameT: 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) - return self._constructor(res_mgr).__finalize__(self, name="abs") + axes_dict = self._construct_axes_dict() + return self._constructor(res_mgr, **axes_dict).__finalize__(self, name="abs") @final def __abs__(self: NDFrameT) -> NDFrameT: @@ -2118,6 +2154,11 @@ def __array_ufunc__( @final def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} + + # TODO: handle unpickling older pickles where index/columns are in mgr + meta["_index"] = self.index + if self.ndim == 2: + meta["_columns"] = self.columns return { "_mgr": self._mgr, "_typ": self._typ, @@ -3891,7 +3932,22 @@ def _take( verify=True, convert_indices=convert_indices, ) - return self._constructor(new_data).__finalize__(self, method="take") + + # We have 6 tests that get here with a slice; TODO: maybe avoid? + # TODO: de-duplicate with similar inside BlockManager.take + indices = ( + np.arange(indices.start, indices.stop, indices.step, dtype=np.intp) + if isinstance(indices, slice) + else np.asanyarray(indices, dtype=np.intp) # <- converts some cases with empty float64 + ) + + axes_dict = self._construct_axes_dict() + if convert_indices and isinstance(indices, np.ndarray): + # i.e. exclude slice, which in principle shouldn't be in a _take + indices = maybe_convert_indices(indices, len(self.axes[axis]), verify=True) + axes_dict[self._get_axis_name(axis)] = self.axes[axis].take(indices)#[indices] + + return self._constructor(new_data, **axes_dict).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis=0) -> NDFrameT: """ @@ -4078,7 +4134,7 @@ class animal locomotion new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( - new_mgr, name=self.index[loc] + new_mgr, name=self.index[loc], index=self.columns ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] @@ -4103,8 +4159,16 @@ def _slice(self: NDFrameT, slobj: slice, axis=0) -> NDFrameT: Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) - axis = self._get_block_manager_axis(axis) - result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) + + axis_name = self._get_axis_name(axis) + new_idx = self.axes[axis][slobj] + axes_dict = self._construct_axes_dict() + axes_dict[axis_name] = new_idx + + bm_axis = self._get_block_manager_axis(axis) + new_mgr = self._mgr.get_slice(slobj, axis=bm_axis) + + result = self._constructor(new_mgr, **axes_dict) result = result.__finalize__(self) # this could be a view @@ -4113,6 +4177,14 @@ def _slice(self: NDFrameT, slobj: slice, axis=0) -> NDFrameT: result._set_is_copy(self, copy=is_copy) return result + @staticmethod + def _get_axes_from_mgr(mgr): + axes_dict = {} + axes_dict["index"] = mgr.axes[-1] + if mgr.ndim == 2: + axes_dict["columns"] = mgr.axes[0] + return axes_dict + @final def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: @@ -4246,9 +4318,20 @@ def __delitem__(self, key) -> None: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: + + # make sure we access self.shape before calling mgr.idelete + is_deleted = np.zeros(self.shape[-1], dtype=np.bool_) + loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) + is_deleted[loc] = True + new_items = self.axes[-1][~is_deleted] + if self.ndim == 1: + self._index = new_items + else: + self._columns = new_items + # delete from the caches try: del self._item_cache[key] @@ -4512,6 +4595,9 @@ def drop( if inplace: self._update_inplace(obj) + self._index = obj._index + if self.ndim > 1: + self._columns = obj._columns else: return obj @@ -4587,6 +4673,8 @@ def _drop_axis( indexer = mask.nonzero()[0] new_axis = axis.take(indexer) + axes_dict = self._construct_axes_dict() + axes_dict[self._get_axis_name(axis_num)] = new_axis bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, @@ -4595,11 +4683,12 @@ def _drop_axis( allow_dups=True, only_slice=only_slice, ) - result = self._constructor(new_mgr) + result = self._constructor(new_mgr, **axes_dict) if self.ndim == 1: result.name = self.name - return result.__finalize__(self) + out = result.__finalize__(self) + return out @final def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: @@ -4617,6 +4706,9 @@ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: self._reset_cache() self._clear_item_cache() self._mgr = result._mgr + self._index = result._index + if self.ndim == 2: + self._columns = result._columns self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) @final @@ -5025,6 +5117,7 @@ def sort_index( inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) + orig_axis = axis ascending = validate_ascending(ascending) target = self._get_axis(axis) @@ -5049,17 +5142,25 @@ def sort_index( baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) + axis_name = self._get_axis_name(axis) + axes_dict = self._construct_axes_dict() + axes_dict[axis_name] = self.axes[axis].take(indexer)._sort_levels_monotonic() + # reconstruct axis if needed - new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) + new_data.set_axis(baxis, axes_dict[axis_name]) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 - new_data.set_axis(axis, default_index(len(indexer))) + rng = default_index(len(indexer)) + new_data.set_axis(axis, rng) + + name = "columns" if orig_axis == 1 else "index" + axes_dict[name] = rng - result = self._constructor(new_data) + result = self._constructor(new_data, **axes_dict) if inplace: - return self._update_inplace(result) + self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") @@ -5366,6 +5467,9 @@ def _reindex_with_indexers( ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated + axes_dict = self._construct_axes_dict() + axes_dict = {x: axes_dict[x].copy(deep=False) for x in axes_dict} + new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] @@ -5389,11 +5493,12 @@ def _reindex_with_indexers( ) # If we've made a copy once, no need to make another one copy = False + axes_dict[self._get_axis_name(axis)] = index if copy and new_data is self._mgr: new_data = new_data.copy() - return self._constructor(new_data).__finalize__(self) + return self._constructor(new_data, **axes_dict).__finalize__(self) def filter( self: NDFrameT, @@ -6018,7 +6123,8 @@ def _consolidate(self): """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) - return self._constructor(cons_data).__finalize__(self) + axes_dict = self._construct_axes_dict() + return self._constructor(cons_data, **axes_dict).__finalize__(self) @property def _is_mixed_type(self) -> bool_t: @@ -6050,11 +6156,17 @@ def _check_inplace_setting(self, value) -> bool_t: @final def _get_numeric_data(self: NDFrameT) -> NDFrameT: - return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) + mgr, taker = self._mgr.get_numeric_data() + axes_dict = self._construct_axes_dict() + axes_dict[self._get_axis_name(self.ndim-1)] = self.axes[-1].take(taker) + return self._constructor(mgr, **axes_dict).__finalize__(self) @final def _get_bool_data(self): - return self._constructor(self._mgr.get_bool_data()).__finalize__(self) + mgr, taker = self._mgr.get_bool_data() + axes_dict = self._construct_axes_dict() + axes_dict[self._get_axis_name(self.ndim-1)] = self.axes[-1].take(taker) + return self._constructor(mgr, **axes_dict).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods @@ -6264,7 +6376,8 @@ def astype( else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) - return self._constructor(new_data).__finalize__(self, method="astype") + axes_dict = self._construct_axes_dict() + return self._constructor(new_data, **axes_dict).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: @@ -6393,7 +6506,10 @@ def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ data = self._mgr.copy(deep=deep) self._clear_item_cache() - return self._constructor(data).__finalize__(self, method="copy") + axes_dict = self._construct_axes_dict() + # TODO: probably need to do this copy elsewhere? + axes_dict = {x: axes_dict[x].copy(deep=False) for x in axes_dict} + return self._constructor(data, **axes_dict).__finalize__(self, method="copy") @final def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: @@ -6436,13 +6552,15 @@ def _convert( validate_bool_kwarg(datetime, "datetime") validate_bool_kwarg(numeric, "numeric") validate_bool_kwarg(timedelta, "timedelta") + axes_dict = self._construct_axes_dict() return self._constructor( self._mgr.convert( datetime=datetime, numeric=numeric, timedelta=timedelta, copy=True, - ) + ), + **axes_dict ).__finalize__(self) @final @@ -6487,8 +6605,10 @@ def infer_objects(self: NDFrameT) -> NDFrameT: # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types + axes_dict = self._construct_axes_dict() return self._constructor( - self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True) + self._mgr.convert(datetime=True, numeric=False, timedelta=True, copy=True), + **axes_dict, ).__finalize__(self, method="infer_objects") @final @@ -6954,7 +7074,8 @@ def fillna( else: raise ValueError(f"invalid fill value with a {type(value)}") - result = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + result = self._constructor(new_data, **axes_dict) if inplace: return self._update_inplace(result) else: @@ -7311,7 +7432,8 @@ def replace( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) - result = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + result = self._constructor(new_data, **axes_dict) if inplace: return self._update_inplace(result) else: @@ -7602,8 +7724,8 @@ def interpolate( downcast=downcast, **kwargs, ) - - result = self._constructor(new_data) + axes_dict = obj._construct_axes_dict() + result = self._constructor(new_data, **axes_dict) if should_transpose: result = result.T if inplace: @@ -9591,8 +9713,9 @@ def _align_series( elif lidx is None or join_index is None: left = self.copy() if copy else self else: + new_mgr = self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) left = self._constructor( - self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) + new_mgr, columns=self.columns, index=join_index ) right = other._reindex_indexer(join_index, ridx, copy) @@ -9615,7 +9738,9 @@ def _align_series( if copy and fdata is self._mgr: fdata = fdata.copy() - left = self._constructor(fdata) + axes_dict = self._construct_axes_dict() + axes_dict["columns"] = join_index + left = self._constructor(fdata, **axes_dict) if ridx is None: right = other @@ -9757,7 +9882,8 @@ def _where( self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) - result = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + result = self._constructor(new_data, **axes_dict) return self._update_inplace(result) else: @@ -9766,7 +9892,8 @@ def _where( cond=cond, align=align, ) - result = self._constructor(new_data) + axes_dict = self._construct_axes_dict() + result = self._constructor(new_data, **axes_dict) return result.__finalize__(self) @overload @@ -10198,7 +10325,8 @@ def shift( new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) - return self._constructor(new_data).__finalize__(self, method="shift") + axes_dict = self._construct_axes_dict() + return self._constructor(new_data, **axes_dict).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) @@ -11245,8 +11373,8 @@ def block_accum_func(blk_values): return result result = self._mgr.apply(block_accum_func) - - return self._constructor(result).__finalize__(self, method=name) + axes_dict = self._construct_axes_dict() + return self._constructor(result, **axes_dict).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 33f3ffa34489e..7326a79a9654b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -175,7 +175,10 @@ def _wrap_agged_manager(self, mgr: Manager) -> Series: else: mgr = cast(Manager2D, mgr) single = mgr.iget(0) - ser = self.obj._constructor(single, name=self.obj.name) + #breakpoint() + # FIXME: get axes without mgr.axes + index = single.axes[0] + ser = self.obj._constructor(single, index=index, name=self.obj.name) # NB: caller is responsible for setting ser.index return ser @@ -1158,7 +1161,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame: for i, (item, sgb) in enumerate(self._iterate_column_groupbys(obj)): result[i] = sgb.aggregate(func, *args, **kwargs) - res_df = self.obj._constructor(result) + res_df = self.obj._constructor(result, columns=obj.columns) res_df.columns = obj.columns return res_df @@ -1318,7 +1321,7 @@ def _cython_transform( mgr: Manager2D = self._get_data_to_aggregate() orig_mgr_len = len(mgr) if numeric_only_bool: - mgr = mgr.get_numeric_data(copy=False) + mgr = mgr.get_numeric_data(copy=False)[0] def arr_func(bvalues: ArrayLike) -> ArrayLike: return self.grouper._cython_operation( @@ -1327,13 +1330,26 @@ def arr_func(bvalues: ArrayLike) -> ArrayLike: # We could use `mgr.apply` here and not have to set_axis, but # we would have to do shape gymnastics for ArrayManager compat - res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True) + res_mgr, taker = mgr.grouped_reduce(arr_func, ignore_failures=True) res_mgr.set_axis(1, mgr.axes[1]) if len(res_mgr) < orig_mgr_len: warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only) - res_df = self.obj._constructor(res_mgr) + columns = mgr.axes[0] + index = res_mgr.axes[1] # FIXME: get index without res_mgr.axes + if self.axis == 0: + + pass#index = self._obj_with_exclusions.index + #columns = columns[taker] + #breakpoint() + else: + #columns = self._obj_with_exclusions.index + pass#index = self._obj_with_exclusions.columns + #breakpoint() + + columns = columns[taker] + res_df = self.obj._constructor(res_mgr, index=index, columns=columns) if self.axis == 1: res_df = res_df.T return res_df @@ -1654,14 +1670,16 @@ def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: rows = mgr.shape[1] if mgr.shape[0] > 0 else 0 index = Index(range(rows)) mgr.set_axis(1, index) - result = self.obj._constructor(mgr) + # FIXME: get axes without mgr.axes + result = self.obj._constructor(mgr, index=index, columns=mgr.axes[0]) self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: index = self.grouper.result_index mgr.set_axis(1, index) - result = self.obj._constructor(mgr) + # FIXME: get axes without mgr.axes + result = self.obj._constructor(mgr, index=index, columns=mgr.axes[0]) if self.axis == 1: result = result.T diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b963b85b93a31..ff46904758025 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1757,7 +1757,7 @@ def _cython_agg_general( f"{type(self).__name__}.{how} does not implement {kwd_name}." ) elif not is_ser: - data = data.get_numeric_data(copy=False) + data = data.get_numeric_data(copy=False)[0] def array_func(values: ArrayLike) -> ArrayLike: try: @@ -1780,7 +1780,7 @@ def array_func(values: ArrayLike) -> ArrayLike: # TypeError -> we may have an exception in trying to aggregate # continue and exclude the block - new_mgr = data.grouped_reduce(array_func, ignore_failures=ignore_failures) + new_mgr, taker = data.grouped_reduce(array_func, ignore_failures=ignore_failures) if not is_ser and len(new_mgr) < orig_len: warn_dropping_nuisance_columns_deprecated(type(self), how, numeric_only) @@ -2055,7 +2055,7 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike: return counted[0] return counted - new_mgr = data.grouped_reduce(hfunc) + new_mgr, taker = data.grouped_reduce(hfunc) # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in @@ -2906,7 +2906,8 @@ def blk_func(values: ArrayLike) -> ArrayLike: mgr = obj._mgr res_mgr = mgr.apply(blk_func) - new_obj = obj._constructor(res_mgr) + axes_dict = obj._construct_axes_dict() + new_obj = obj._constructor(res_mgr, **axes_dict) if isinstance(new_obj, Series): new_obj.name = obj.name @@ -3371,9 +3372,9 @@ def blk_func(values: ArrayLike) -> ArrayLike: obj = self._obj_with_exclusions is_ser = obj.ndim == 1 mgr = self._get_data_to_aggregate() - data = mgr.get_numeric_data() if numeric_only_bool else mgr + data = mgr.get_numeric_data()[0] if numeric_only_bool else mgr ignore_failures = numeric_only_bool - res_mgr = data.grouped_reduce(blk_func, ignore_failures=ignore_failures) + res_mgr, taker = data.grouped_reduce(blk_func, ignore_failures=ignore_failures) if ( numeric_only is lib.no_default @@ -3395,7 +3396,13 @@ def blk_func(values: ArrayLike) -> ArrayLike: if is_ser: res = self._wrap_agged_manager(res_mgr) else: - res = obj._constructor(res_mgr) + # FIXME: get axes without mgr.axes + axes_dict = {} + axes_dict["index"] = res_mgr.axes[-1] + if res_mgr.ndim == 2: + axes_dict["columns"] = res_mgr.axes[0] + #breakpoint() + res = obj._constructor(res_mgr, **axes_dict) if orig_scalar: # Avoid expensive MultiIndex construction @@ -3687,7 +3694,7 @@ def cummin(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT: skipna = kwargs.get("skipna", True) if axis != 0: f = lambda x: np.minimum.accumulate(x, axis) - numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis) + numeric_only_bool = self._resolve_numeric_only("cummax", numeric_only, axis) # TODO: "cummin"? obj = self._selected_obj if numeric_only_bool: obj = obj._get_numeric_data() @@ -3845,9 +3852,9 @@ def blk_func(values: ArrayLike) -> ArrayLike: orig_mgr_len = len(mgr) if numeric_only_bool: - mgr = mgr.get_numeric_data() + mgr = mgr.get_numeric_data()[0] - res_mgr = mgr.grouped_reduce(blk_func, ignore_failures=True) + res_mgr, taker = mgr.grouped_reduce(blk_func, ignore_failures=True) if not is_ser and len(res_mgr.items) != orig_mgr_len: howstr = how.replace("group_", "") @@ -3864,7 +3871,13 @@ def blk_func(values: ArrayLike) -> ArrayLike: if is_ser: out = self._wrap_agged_manager(res_mgr) else: - out = obj._constructor(res_mgr) + # FIXME: get axes without mgr.axes + if self.axis == 0 and not numeric_only_bool: + columns = self._obj_with_exclusions.columns[taker] + else: + #breakpoint() + columns = res_mgr.axes[0] + out = obj._constructor(res_mgr, index=res_mgr.axes[1], columns=columns) return self._wrap_aggregated_output(out) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index ba808e1f2e07f..46440f59adcfc 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -1348,7 +1348,8 @@ class SeriesSplitter(DataSplitter): def _chop(self, sdata: Series, slice_obj: slice) -> Series: # fastpath equivalent to `sdata.iloc[slice_obj]` mgr = sdata._mgr.get_slice(slice_obj) - ser = sdata._constructor(mgr, name=sdata.name, fastpath=True) + index = sdata.index[slice_obj] + ser = sdata._constructor(mgr, index=index, name=sdata.name, fastpath=True) return ser.__finalize__(sdata, method="groupby") @@ -1360,7 +1361,13 @@ def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: # else: # return sdata.iloc[:, slice_obj] mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis) - df = sdata._constructor(mgr) + if self.axis == 0: + index = sdata.index[slice_obj] + columns = sdata.columns + else: + index = sdata.index + columns = sdata.columns[slice_obj] + df = sdata._constructor(mgr, index=index, columns=columns) return df.__finalize__(sdata, method="groupby") diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 52150eafd7783..ef8a3f6aef191 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -828,7 +828,9 @@ def _view(self: _IndexT) -> _IndexT: result = self._simple_new(self._values, name=self._name) result._cache = self._cache + result._id = self._id return result + # TODO: preserve _id? @final def _rename(self: _IndexT, name: Hashable) -> _IndexT: diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index d415cbd035cd1..780dc8695a87b 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -803,6 +803,9 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None) -> None: keys = self.obj.columns.union(key, sort=False) self.obj._mgr = self.obj._mgr.reindex_axis(keys, axis=0, only_slice=True) + assert self.obj._mgr.axes[0].equals(keys) + self.obj._columns = Index(keys) + @final def __setitem__(self, key, value) -> None: @@ -1584,6 +1587,7 @@ def _get_list_axis(self, key, axis: int): return self.obj._take_with_is_copy(key, axis=axis) except IndexError as err: # re-raise with different error message + raise # watch out for case with wrong dtype key? raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): @@ -1603,6 +1607,9 @@ def _getitem_axis(self, key, axis: int): if isinstance(key, list): key = np.asarray(key) + #if len(key) == 0: + # key = key.astype(np.intp) + # TODO: if empty, do intp instead of float64? if com.is_bool_indexer(key): self._validate_key(key, axis) @@ -1765,8 +1772,14 @@ def _setitem_with_indexer(self, indexer, value, name="iloc"): reindexers, allow_dups=True ) self.obj._mgr = new_obj._mgr + # TODO: use update_inplace? + if i == 0: + self.obj._index = labels + else: + self.obj._columns = labels self.obj._maybe_update_cacher(clear=True) self.obj._is_copy = None + nindexer.append(labels.get_loc(key)) @@ -1988,6 +2001,7 @@ def _setitem_single_column(self, loc: int, value, plane_indexer) -> None: # falling back to casting if necessary) self.obj._mgr.column_setitem(loc, plane_indexer, value) self.obj._clear_item_cache() + return # We will not operate in-place, but will attempt to in the future. @@ -2077,6 +2091,7 @@ def _setitem_single_block(self, indexer, value, name: str) -> None: # actually do the set self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) + # FIXME: update axes? self.obj._maybe_update_cacher(clear=True, inplace=True) def _setitem_with_indexer_missing(self, indexer, value): @@ -2129,8 +2144,10 @@ def _setitem_with_indexer_missing(self, indexer, value): self.obj._mgr = self.obj._constructor( new_values, index=new_index, name=self.obj.name )._mgr + self.obj._index = new_index self.obj._maybe_update_cacher(clear=True) + elif self.ndim == 2: if not len(self.obj.columns): @@ -2172,8 +2189,13 @@ def _setitem_with_indexer_missing(self, indexer, value): # dtype. But if we had a list or dict, then do inference df = df.infer_objects() self.obj._mgr = df._mgr + self.obj._index = df.index + else: - self.obj._mgr = self.obj._append(value)._mgr + new_obj = self.obj._append(value) + self.obj._mgr = new_obj._mgr + self.obj._index = new_obj.index + self.obj._maybe_update_cacher(clear=True) def _ensure_iterable_column_indexer(self, column_indexer): diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 53f8486074ef9..9e0e832b8f3a5 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -464,7 +464,7 @@ def is_view(self) -> bool: def is_single_block(self) -> bool: return len(self.arrays) == 1 - def _get_data_subset(self: T, predicate: Callable) -> T: + def _get_data_subset(self: T, predicate: Callable) -> tuple[T, npt.NDArray[np.intp]]: indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)] arrays = [self.arrays[i] for i in indices] # TODO copy? @@ -473,9 +473,9 @@ def _get_data_subset(self: T, predicate: Callable) -> T: taker = np.array(indices, dtype="intp") new_cols = self._axes[1].take(taker) new_axes = [self._axes[0], new_cols] - return type(self)(arrays, new_axes, verify_integrity=False) + return type(self)(arrays, new_axes, verify_integrity=False), taker - def get_bool_data(self: T, copy: bool = False) -> T: + def get_bool_data(self: T, copy: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Select columns that are bool-dtype and object-dtype columns that are all-bool. @@ -486,7 +486,7 @@ def get_bool_data(self: T, copy: bool = False) -> T: """ return self._get_data_subset(is_inferred_bool_dtype) - def get_numeric_data(self: T, copy: bool = False) -> T: + def get_numeric_data(self: T, copy: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Select columns that have a numeric dtype. @@ -934,7 +934,7 @@ def idelete(self, indexer) -> ArrayManager: # -------------------------------------------------------------------- # Array-wise Operation - def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: + def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Apply grouped reduction function columnwise, returning a new ArrayManager. @@ -947,6 +947,7 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: Returns ------- ArrayManager + np.ndarray[intp] """ result_arrays: list[np.ndarray] = [] result_indices: list[int] = [] @@ -974,14 +975,16 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: else: index = Index(range(result_arrays[0].shape[0])) + taker = None if ignore_failures: - columns = self.items[np.array(result_indices, dtype="int64")] + taker = np.array(result_indices, dtype=np.intp) + columns = self.items[taker] else: columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" - return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + return type(self)(result_arrays, [index, columns]), taker # type: ignore[arg-type] def reduce( self: T, func: Callable, ignore_failures: bool = False @@ -1180,6 +1183,9 @@ def as_array( return result + def __len__(self) -> int: + return len(self.arrays) + class SingleArrayManager(BaseArrayManager, SingleDataManager): diff --git a/pandas/core/internals/base.py b/pandas/core/internals/base.py index ddc4495318568..f082f2c3778d4 100644 --- a/pandas/core/internals/base.py +++ b/pandas/core/internals/base.py @@ -45,7 +45,7 @@ def items(self) -> Index: @final def __len__(self) -> int: - return len(self.items) + raise AbstractMethodError(self) @property def ndim(self) -> int: @@ -124,8 +124,8 @@ def equals(self, other: object) -> bool: self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False - if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): - return False + #if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + # return False return self._equal_values(other) @@ -160,6 +160,9 @@ class SingleDataManager(DataManager): def ndim(self) -> Literal[1]: return 1 + def __len__(self) -> int: + return len(self.arrays[0]) + @final @property def array(self) -> ArrayLike: @@ -200,7 +203,7 @@ def grouped_reduce(self, func, ignore_failures: bool = False): index = default_index(len(res)) mgr = type(self).from_array(res, index) - return mgr + return mgr, np.arange(len(res), dtype=np.intp) # TODO: is taker meaningful here? @classmethod def from_array(cls, arr: ArrayLike, index: Index): diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 6aad8dbd940d4..708adc6201fe1 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -107,7 +107,7 @@ def arrays_to_mgr( verify_integrity: bool = True, typ: str | None = None, consolidate: bool = True, -) -> Manager: +) -> tuple[Manager, Index, Index]: """ Segregate Series based on type and coerce into matrices. @@ -152,13 +152,14 @@ def arrays_to_mgr( axes = [columns, index] if typ == "block": - return create_block_manager_from_column_arrays( + mgr = create_block_manager_from_column_arrays( arrays, axes, consolidate=consolidate ) elif typ == "array": - return ArrayManager(arrays, [index, columns]) + mgr = ArrayManager(arrays, [index, columns]) else: raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + return mgr, index, columns def rec_array_to_mgr( @@ -204,7 +205,7 @@ def rec_array_to_mgr( if columns is None: columns = arr_columns - mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) + mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)[0] if copy: mgr = mgr.copy() @@ -242,7 +243,7 @@ def mgr_to_mgr(mgr, typ: str, copy: bool = True): new_mgr = mgr else: if mgr.ndim == 2: - new_mgr = arrays_to_mgr( + new_mgr, _, _ = arrays_to_mgr( mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" ) else: @@ -314,7 +315,7 @@ def ndarray_to_mgr( else: columns = ensure_index(columns) - return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) + return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)[0] elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype): # i.e. Datetime64TZ, PeriodDtype @@ -491,7 +492,7 @@ def dict_to_mgr( # dtype check to exclude e.g. range objects, scalars arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] - return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) + return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)[0] def nested_data_to_arrays( diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 61037a46f4f92..130f70fb9fb2a 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -353,7 +353,7 @@ def apply( result_blocks = extend_blocks(applied, result_blocks) if ignore_failures: - return self._combine(result_blocks) + return self._combine(result_blocks)[0] out = type(self).from_blocks(result_blocks, self.axes) return out @@ -524,11 +524,11 @@ def is_view(self) -> bool: return False - def _get_data_subset(self: T, predicate: Callable) -> T: + def _get_data_subset(self: T, predicate: Callable) -> tuple[T, npt.NDArray[np.intp]]: blocks = [blk for blk in self.blocks if predicate(blk.values)] return self._combine(blocks, copy=False) - def get_bool_data(self: T, copy: bool = False) -> T: + def get_bool_data(self: T, copy: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Select blocks that are bool-dtype and columns from object-dtype blocks that are all-bool. @@ -553,7 +553,7 @@ def get_bool_data(self: T, copy: bool = False) -> T: return self._combine(new_blocks, copy) - def get_numeric_data(self: T, copy: bool = False) -> T: + def get_numeric_data(self: T, copy: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Parameters ---------- @@ -563,24 +563,26 @@ def get_numeric_data(self: T, copy: bool = False) -> T: numeric_blocks = [blk for blk in self.blocks if blk.is_numeric] if len(numeric_blocks) == len(self.blocks): # Avoid somewhat expensive _combine + taker = np.arange(len(self), dtype=np.intp) # TODO: return None to indicate no take needed? if copy: - return self.copy(deep=True) - return self + return self.copy(deep=True), taker + return self, taker return self._combine(numeric_blocks, copy) def _combine( self: T, blocks: list[Block], copy: bool = True, index: Index | None = None - ) -> T: + ) -> tuple[T, npt.NDArray[np.intp]]: """return a new manager with the blocks""" if len(blocks) == 0: + indexer = np.arange(0, dtype=np.intp) if self.ndim == 2: # retain our own Index dtype if index is not None: axes = [self.items[:0], index] else: axes = [self.items[:0]] + self.axes[1:] - return self.make_empty(axes) - return self.make_empty() + return self.make_empty(axes), indexer + return self.make_empty(), indexer # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) @@ -604,7 +606,7 @@ def _combine( axes[-1] = index axes[0] = self.items.take(indexer) - return type(self).from_blocks(new_blocks, axes, new_refs) + return type(self).from_blocks(new_blocks, axes, new_refs), indexer @property def nblocks(self) -> int: @@ -1473,7 +1475,7 @@ def idelete(self, indexer) -> BlockManager: # ---------------------------------------------------------------- # Block-wise Operation - def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: + def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> tuple[T, npt.NDArray[np.intp]]: """ Apply grouped reduction function blockwise, returning a new BlockManager. @@ -1486,6 +1488,7 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: Returns ------- BlockManager + np.ndarray[intp] """ result_blocks: list[Block] = [] dropped_any = False @@ -1522,7 +1525,8 @@ def grouped_reduce(self: T, func: Callable, ignore_failures: bool = False) -> T: # faster to skip _combine if we haven't dropped any blocks return self._combine(result_blocks, copy=False, index=index) - return type(self).from_blocks(result_blocks, [self.axes[0], index]) + taker = np.arange(len(self), dtype=np.intp) + return type(self).from_blocks(result_blocks, [self.axes[0], index]), taker def reduce( self: T, func: Callable, ignore_failures: bool = False @@ -1554,7 +1558,7 @@ def reduce( if ignore_failures: if res_blocks: indexer = np.concatenate([blk.mgr_locs.as_array for blk in res_blocks]) - new_mgr = self._combine(res_blocks, copy=False, index=index) + new_mgr = self._combine(res_blocks, copy=False, index=index)[0] else: indexer = [] new_mgr = type(self).from_blocks([], [self.items[:0], index]) @@ -1618,7 +1622,7 @@ def quantile( # ---------------------------------------------------------------- - def unstack(self, unstacker, fill_value) -> BlockManager: + def unstack(self, unstacker, fill_value) -> tuple[BlockManager, list[np.ndarray]]: """ Return a BlockManager with all blocks unstacked. @@ -1677,7 +1681,7 @@ def unstack(self, unstacker, fill_value) -> BlockManager: new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) - return bm + return bm, columns_mask def to_dict(self, copy: bool = True): """ @@ -1697,7 +1701,7 @@ def to_dict(self, copy: bool = True): bd.setdefault(str(b.dtype), []).append(b) # TODO(EA2D): the combine will be unnecessary with 2D EAs - return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} + return {dtype: self._combine(blocks, copy=copy)[0] for dtype, blocks in bd.items()} def as_array( self, @@ -1856,6 +1860,10 @@ def _consolidate_inplace(self) -> None: self._known_consolidated = True self._rebuild_blknos_and_blklocs() + def __len__(self) -> int: + # TODO: cache? would need to invalidate akin to blklocs + return sum(x.shape[0] for x in self.blocks) + class SingleBlockManager(BaseBlockManager, SingleDataManager): """manage a single block with""" @@ -2049,8 +2057,10 @@ def array_values(self): def get_numeric_data(self, copy: bool = False): if self._block.is_numeric: - return self.copy(deep=copy) - return self.make_empty() + taker = np.arange(len(self.items), dtype=np.intp) + return self.copy(deep=copy), taker + taker = np.array([], dtype=np.intp) + return self.make_empty(), taker @property def _can_hold_na(self) -> bool: diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 85731bbde6d40..3411e674afa6f 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1937,7 +1937,7 @@ def _take_new_index( new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) # error: Incompatible return value type # (got "DataFrame", expected "NDFrameT") - return obj._constructor(new_mgr) # type: ignore[return-value] + return obj._constructor(new_mgr, index=new_index, columns=obj.columns) # type: ignore[return-value] else: raise ValueError("'obj' should be either a Series or a DataFrame") diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 3d9e4f0c69c62..ac60aaf591c27 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -621,7 +621,7 @@ def get_result(self): new_data._consolidate_inplace() cons = sample._constructor - return cons(new_data).__finalize__(self, method="concat") + return cons(new_data, index=self.new_axes[1], columns=self.new_axes[0]).__finalize__(self, method="concat") def _get_result_dim(self) -> int: if self._is_series and self.bm_axis == 1: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 524b26ff07769..3aae45e19bddf 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -746,7 +746,7 @@ def _reindex_and_concat( allow_dups=True, use_na_proxy=True, ) - left = left._constructor(lmgr) + left = left._constructor(lmgr, index=join_index, columns=left.columns) left.index = join_index if right_indexer is not None: @@ -759,7 +759,7 @@ def _reindex_and_concat( allow_dups=True, use_na_proxy=True, ) - right = right._constructor(rmgr) + right = right._constructor(rmgr, index=join_index, columns=right.columns) right.index = join_index from pandas import concat diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 0270a5dd75952..a788bd2f0dbd7 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -500,8 +500,10 @@ def _unstack_frame(obj: DataFrame, level, fill_value=None): unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor) if not obj._can_fast_transpose: - mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) - return obj._constructor(mgr) + mgr, columns_mask = obj._mgr.unstack(unstacker, fill_value=fill_value) + new_columns = unstacker.get_new_columns(obj.columns) + new_columns = new_columns[columns_mask] + return obj._constructor(mgr, index=unstacker.new_index, columns=new_columns) else: return unstacker.get_result( obj._values, value_columns=obj.columns, fill_value=fill_value diff --git a/pandas/core/series.py b/pandas/core/series.py index fc97a8f04e0cc..8f5802d0b5497 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -318,7 +318,7 @@ class Series(base.IndexOpsMixin, NDFrame): _name: Hashable _metadata: list[str] = ["name"] - _internal_names_set = {"index"} | NDFrame._internal_names_set + _internal_names_set = {"_index", "index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs @@ -351,6 +351,15 @@ def __init__( fastpath: bool = False, ) -> None: + if isinstance(data, (SingleBlockManager, SingleArrayManager)): + if index is None: + assert False + if data.axes[0] is not index: + # Adding check to try to avoid segfualt in json tests + data.axes = [ensure_index(index)] + #if not index.equals(data.axes[0]):#index is not data.axes[0]: + # assert False + if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None @@ -592,7 +601,8 @@ def _set_axis(self, axis: int, labels: AnyArrayLike | list) -> None: pass # The ensure_index call above ensures we have an Index object - self._mgr.set_axis(axis, labels) + self._validate_set_axis(0, labels) + object.__setattr__(self, "_index", labels) # ndarray compatibility @property @@ -1071,7 +1081,8 @@ def _get_values_tuple(self, key: tuple): def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) - return self._constructor(new_mgr).__finalize__(self) + new_index = self.index[indexer] + return self._constructor(new_mgr, index=new_index).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ @@ -1946,7 +1957,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) - df = self._constructor_expanddim(mgr) + df = self._constructor_expanddim(mgr, index=self.index, columns=columns) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace=False) -> Series: @@ -2349,6 +2360,7 @@ def drop_duplicates( result = super().drop_duplicates(keep=keep) if inplace: self._update_inplace(result) + self._index = result.index return None else: return result diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index b30b27f5bae1a..ecf9aacae2c72 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -714,7 +714,7 @@ def test_consolidate_ordering_issues(self, mgr): # we have datetime/tz blocks in mgr cons = mgr.consolidate() assert cons.nblocks == 4 - cons = mgr.consolidate().get_numeric_data() + cons = mgr.consolidate().get_numeric_data()[0] assert cons.nblocks == 1 assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement) tm.assert_numpy_array_equal( @@ -752,7 +752,7 @@ def test_get_numeric_data(self, using_copy_on_write): ) mgr.iset(5, np.array([1, 2, 3], dtype=np.object_)) - numeric = mgr.get_numeric_data() + numeric = mgr.get_numeric_data()[0] tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"])) tm.assert_almost_equal( mgr.iget(mgr.items.get_loc("float")).internal_values(), @@ -776,7 +776,7 @@ def test_get_numeric_data(self, using_copy_on_write): np.array([100.0, 200.0, 300.0]), ) - numeric2 = mgr.get_numeric_data(copy=True) + numeric2 = mgr.get_numeric_data(copy=True)[0] tm.assert_index_equal(numeric.items, Index(["int", "float", "complex", "bool"])) numeric2.iset( numeric2.items.get_loc("float"), @@ -804,7 +804,7 @@ def test_get_bool_data(self, using_copy_on_write): mgr.iset(6, np.array([True, False, True], dtype=np.object_)) with tm.assert_produces_warning(FutureWarning, match=msg): - bools = mgr.get_bool_data() + bools = mgr.get_bool_data()[0] tm.assert_index_equal(bools.items, Index(["bool", "dt"])) tm.assert_almost_equal( mgr.iget(mgr.items.get_loc("bool")).internal_values(), @@ -825,7 +825,7 @@ def test_get_bool_data(self, using_copy_on_write): # Check sharing with tm.assert_produces_warning(FutureWarning, match=msg): - bools2 = mgr.get_bool_data(copy=True) + bools2 = mgr.get_bool_data(copy=True)[0] bools2.iset(0, np.array([False, True, False])) if using_copy_on_write: tm.assert_numpy_array_equal( diff --git a/pandas/tests/series/methods/test_reindex.py b/pandas/tests/series/methods/test_reindex.py index b64c7bec6ea39..7ed3273b2e6a0 100644 --- a/pandas/tests/series/methods/test_reindex.py +++ b/pandas/tests/series/methods/test_reindex.py @@ -22,6 +22,7 @@ def test_reindex(datetime_series, string_series): identity = string_series.reindex(string_series.index) + # TODO: is the comment below still accurate for supported numpies? # __array_interface__ is not defined for older numpies # and on some pythons try:
cc @jorisvandenbossche we've discussed the idea of refactoring the Manager classes to not have the axes. This is a (not-remotely-working) attempt at de-coupling the NDFrame axes from Manager axes. Some questions before I spend much more time on this: 1) Am I remembering correctly that you are in principle in favor of this refactor? 2) Thoughts on how to handle the pyarrow usages? 3) Thoughts on cleaner ways to implement this in managably-sized chunks?
https://api.github.com/repos/pandas-dev/pandas/pulls/48126
2022-08-17T16:57:38Z
2022-10-03T19:47:39Z
null
2022-10-03T19:47:44Z
BUG: #39448 fixed dropna to return copy correctly
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0ceac8aeb9db8..0d5b96439843c 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -993,7 +993,7 @@ Indexing - Bug in :meth:`DataFrame.sum` min_count changes dtype if input contains NaNs (:issue:`46947`) - Bug in :class:`IntervalTree` that lead to an infinite recursion. (:issue:`46658`) - Bug in :class:`PeriodIndex` raising ``AttributeError`` when indexing on ``NA``, rather than putting ``NaT`` in its place. (:issue:`46673`) -- +- Bug in :meth:`DataFrame.dropna` where a copy of the dataframe was not returned if rows are dropped (:issue:`39448`) Missing ^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7a4f41da5840c..c7aa3f7da6f3c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6546,7 +6546,7 @@ def dropna( if np.all(mask): result = self.copy() else: - result = self.loc(axis=axis)[mask] + result = self.loc(axis=axis)[mask].copy() if not inplace: return result
- [X] closes #39448 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [N/A] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48125
2022-08-17T15:45:12Z
2022-09-26T23:02:07Z
null
2022-09-26T23:02:07Z
1.4.x
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fcd822988de20..b22f93d488de2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -198,6 +198,7 @@ nargsort, ) +from pandas.errors import InvalidIndexError from pandas.io.common import get_handle from pandas.io.formats import ( console, @@ -3872,7 +3873,7 @@ def _set_value( # or ValueError series._mgr.setitem_inplace(loc, value) - except (KeyError, TypeError, ValueError): + except (KeyError, TypeError, ValueError, InvalidIndexError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value
fix a bug using at to setitem will raise InvalidIndexError: df.at[slice, item] = array_or_other if item existed
https://api.github.com/repos/pandas-dev/pandas/pulls/48120
2022-08-17T08:03:15Z
2022-08-19T15:50:37Z
null
2022-08-19T15:50:37Z
WEB: Update inactive maintainers
diff --git a/web/pandas/config.yml b/web/pandas/config.yml index 79a77e80830f3..b33ea5b0dd972 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -69,21 +69,16 @@ maintainers: - jorisvandenbossche - TomAugspurger - jreback - - sinhrks - - cpcloud - gfyoung - WillAyd - mroeschke - - jschendel - jbrockmendel - datapythonista - simonjayhawkins - topper-123 - alimcmaster1 - bashtage - - charlesdong1991 - Dr-Irv - - dsaxton - MarcoGorelli - rhshadrach - phofl @@ -98,7 +93,12 @@ maintainers: - jtratner - shoyer - chris-b1 + - sinhrks + - cpcloud - toobaz + - jschendel + - charlesdong1991 + - dsaxton coc: - Safia Abdalla - Tom Augspurger
Following the proposal in the pandas core dev list, and after sending direct messages to all the people moved to inactive here, and getting an ok, or no answer, I update the lists of active/inactive maintainers. If any of the people being updated here is still active in the project, just let me know, happy to revert changes to anyone who doesn't wish to be moved to inactive status. Please leave some days before merging this, so people have time to object. In any case, if any objection after being merged, we can revert in a new PR. CC: @sinhrks @cpcloud @jschendel @topper-123 @charlesdong1991 @dsaxton
https://api.github.com/repos/pandas-dev/pandas/pulls/48119
2022-08-17T05:19:20Z
2022-08-19T17:36:23Z
2022-08-19T17:36:23Z
2022-08-19T17:36:23Z
DOC: GH48097 remove optional from data for DatetimeIndex docs
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 2625d8c683a0c..835d6a3948724 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -146,8 +146,8 @@ class DatetimeIndex(DatetimeTimedeltaMixin): Parameters ---------- - data : array-like (1-dimensional), optional - Optional datetime-like data to construct index with. + data : array-like (1-dimensional) + Datetime-like data to construct index with. freq : str or pandas offset object, optional One of pandas date offset strings or corresponding objects. The string 'infer' can be passed in order to set the frequency of the index as the
- [x] closes #48097 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature. updated docstring for `DatetimeIndex` such that `data` is no longer optional in the docs
https://api.github.com/repos/pandas-dev/pandas/pulls/48118
2022-08-17T04:33:59Z
2022-08-17T16:51:05Z
2022-08-17T16:51:05Z
2022-08-17T16:51:12Z
ENH: droplevel copy keyword
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bcbe2c6d8b104..8aa4878288507 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -297,6 +297,7 @@ Other enhancements - :meth:`DataFrame.quantile` gained a ``method`` argument that can accept ``table`` to evaluate multi-column quantiles (:issue:`43881`) - :class:`Interval` now supports checking whether one interval is contained by another interval (:issue:`46613`) - Added ``copy`` keyword to :meth:`Series.set_axis` and :meth:`DataFrame.set_axis` to allow user to set axis on a new object without necessarily copying the underlying data (:issue:`47932`) +- :meth:`DataFrame.droplevel` and :meth:`Series.droplevel` support a ``copy`` argument. if ``False``, the underlying data is not copied (:issue:`48117`) - :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`) - :meth:`DataFrame.set_index` now supports a ``copy`` keyword. If ``False``, the underlying data is not copied when a new :class:`DataFrame` is returned (:issue:`48043`) - The method :meth:`.ExtensionArray.factorize` accepts ``use_na_sentinel=False`` for determining how null values are to be treated (:issue:`46601`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 88184285d3683..e3ee68ee0fa87 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -855,7 +855,9 @@ def swapaxes( @final @doc(klass=_shared_doc_kwargs["klass"]) - def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: + def droplevel( + self: NDFrameT, level: IndexLabel, axis: Axis = 0, copy: bool_t = True + ) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. @@ -874,6 +876,11 @@ def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: For `Series` this parameter is unused and defaults to 0. + copy : bool, default True + Whether to make a copy of the underlying data. + + .. versionadded:: 1.5.0 + Returns ------- {klass} @@ -916,7 +923,7 @@ def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) - return self.set_axis(new_labels, axis=axis) + return self.set_axis(new_labels, axis=axis, copy=copy) def pop(self, item: Hashable) -> Series | Any: result = self[item] diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 867835ef7f0a3..4bc3559c42401 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -241,7 +241,7 @@ def __internal_pivot_table( # discard the top level if values_passed and not values_multi and table.columns.nlevels > 1: - table = table.droplevel(0, axis=1) + table = table.droplevel(0, axis=1, copy=False) if len(index) == 0 and len(columns) > 0: table = table.T diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 0270a5dd75952..047774f22a9f5 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -535,10 +535,7 @@ def _unstack_extension_series(series: Series, level, fill_value) -> DataFrame: df = series.to_frame() result = df.unstack(level=level, fill_value=fill_value) - # equiv: result.droplevel(level=0, axis=1) - # but this avoids an extra copy - result.columns = result.columns.droplevel(0) - return result + return result.droplevel(level=0, axis=1, copy=False) def stack(frame: DataFrame, level=-1, dropna: bool = True): diff --git a/pandas/tests/frame/methods/test_droplevel.py b/pandas/tests/frame/methods/test_droplevel.py index e1302d4b73f2b..3b967b63a9b91 100644 --- a/pandas/tests/frame/methods/test_droplevel.py +++ b/pandas/tests/frame/methods/test_droplevel.py @@ -34,3 +34,28 @@ def test_droplevel(self, frame_or_series): # test that droplevel raises ValueError on axis != 0 with pytest.raises(ValueError, match="No axis named columns"): df.droplevel(1, axis="columns") + + def test_droplevel_copy(self, frame_or_series): + cols = MultiIndex.from_tuples( + [("c", "e"), ("d", "f")], names=["level_1", "level_2"] + ) + mi = MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]) + df = DataFrame([[3, 4], [7, 8], [11, 12]], index=mi, columns=cols) + if frame_or_series is not DataFrame: + df = df.iloc[:, 0] + + # Check that we DID make a copy + res = df.droplevel("a", axis="index", copy=True) + if frame_or_series is DataFrame: + for i in range(df.shape[1]): + assert not tm.shares_memory(df.iloc[:, i], res.iloc[:, i]) + else: + assert not tm.shares_memory(res, df) + + # Check that we did NOT make a copy + res = df.droplevel("a", axis="index", copy=False) + if frame_or_series is DataFrame: + for i in range(df.shape[1]): + assert tm.shares_memory(df.iloc[:, i], res.iloc[:, i]) + else: + assert tm.shares_memory(res, df)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48117
2022-08-17T01:56:59Z
2022-09-14T21:27:37Z
null
2022-09-14T21:27:42Z
Revert Interval/IntervalIndex/interval_range.inclusive deprecation
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 89c627865049e..05e12630d7540 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -268,9 +268,7 @@ def setup(self, bins): self.datetime_series = pd.Series( np.random.randint(N, size=N), dtype="datetime64[ns]" ) - self.interval_bins = pd.IntervalIndex.from_breaks( - np.linspace(0, N, bins), "right" - ) + self.interval_bins = pd.IntervalIndex.from_breaks(np.linspace(0, N, bins)) def time_cut_int(self, bins): pd.cut(self.int_series, bins) diff --git a/doc/redirects.csv b/doc/redirects.csv index 90ddf6c4dc582..9b8a5a73dedff 100644 --- a/doc/redirects.csv +++ b/doc/redirects.csv @@ -741,11 +741,11 @@ generated/pandas.Index.values,../reference/api/pandas.Index.values generated/pandas.Index.view,../reference/api/pandas.Index.view generated/pandas.Index.where,../reference/api/pandas.Index.where generated/pandas.infer_freq,../reference/api/pandas.infer_freq -generated/pandas.Interval.inclusive,../reference/api/pandas.Interval.inclusive +generated/pandas.Interval.closed,../reference/api/pandas.Interval.closed generated/pandas.Interval.closed_left,../reference/api/pandas.Interval.closed_left generated/pandas.Interval.closed_right,../reference/api/pandas.Interval.closed_right generated/pandas.Interval,../reference/api/pandas.Interval -generated/pandas.IntervalIndex.inclusive,../reference/api/pandas.IntervalIndex.inclusive +generated/pandas.IntervalIndex.closed,../reference/api/pandas.IntervalIndex.closed generated/pandas.IntervalIndex.contains,../reference/api/pandas.IntervalIndex.contains generated/pandas.IntervalIndex.from_arrays,../reference/api/pandas.IntervalIndex.from_arrays generated/pandas.IntervalIndex.from_breaks,../reference/api/pandas.IntervalIndex.from_breaks @@ -761,7 +761,6 @@ generated/pandas.IntervalIndex.mid,../reference/api/pandas.IntervalIndex.mid generated/pandas.IntervalIndex.overlaps,../reference/api/pandas.IntervalIndex.overlaps generated/pandas.IntervalIndex.right,../reference/api/pandas.IntervalIndex.right generated/pandas.IntervalIndex.set_closed,../reference/api/pandas.IntervalIndex.set_closed -generated/pandas.IntervalIndex.set_inclusive,../reference/api/pandas.IntervalIndex.set_inclusive generated/pandas.IntervalIndex.to_tuples,../reference/api/pandas.IntervalIndex.to_tuples generated/pandas.IntervalIndex.values,../reference/api/pandas.IntervalIndex.values generated/pandas.Interval.left,../reference/api/pandas.Interval.left diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index cd0ce581519a8..1b8e0fdb856b5 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -303,7 +303,6 @@ Properties .. autosummary:: :toctree: api/ - Interval.inclusive Interval.closed Interval.closed_left Interval.closed_right @@ -341,7 +340,7 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`. arrays.IntervalArray.left arrays.IntervalArray.right - arrays.IntervalArray.inclusive + arrays.IntervalArray.closed arrays.IntervalArray.mid arrays.IntervalArray.length arrays.IntervalArray.is_empty @@ -352,7 +351,6 @@ A collection of intervals may be stored in an :class:`arrays.IntervalArray`. arrays.IntervalArray.contains arrays.IntervalArray.overlaps arrays.IntervalArray.set_closed - arrays.IntervalArray.set_inclusive arrays.IntervalArray.to_tuples diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index 589a339a1ca60..ddfef14036ef3 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -242,7 +242,7 @@ IntervalIndex components IntervalIndex.left IntervalIndex.right IntervalIndex.mid - IntervalIndex.inclusive + IntervalIndex.closed IntervalIndex.length IntervalIndex.values IntervalIndex.is_empty @@ -251,7 +251,6 @@ IntervalIndex components IntervalIndex.get_loc IntervalIndex.get_indexer IntervalIndex.set_closed - IntervalIndex.set_inclusive IntervalIndex.contains IntervalIndex.overlaps IntervalIndex.to_tuples diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index aaff76261b3ad..b8df21ab5a5b4 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -1020,7 +1020,7 @@ Trying to select an ``Interval`` that is not exactly contained in the ``Interval In [7]: df.loc[pd.Interval(0.5, 2.5)] --------------------------------------------------------------------------- - KeyError: Interval(0.5, 2.5, inclusive='right') + KeyError: Interval(0.5, 2.5, closed='right') Selecting all ``Intervals`` that overlap a given ``Interval`` can be performed using the :meth:`~IntervalIndex.overlaps` method to create a boolean indexer. @@ -1082,14 +1082,14 @@ of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like inter pd.interval_range(start=pd.Timedelta("0 days"), periods=3, freq="9H") -Additionally, the ``inclusive`` parameter can be used to specify which side(s) the intervals -are closed on. Intervals are closed on the both side by default. +Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals +are closed on. Intervals are closed on the right side by default. .. ipython:: python - pd.interval_range(start=0, end=4, inclusive="both") + pd.interval_range(start=0, end=4, closed="both") - pd.interval_range(start=0, end=4, inclusive="neither") + pd.interval_range(start=0, end=4, closed="neither") Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index a23c977e94b65..faf4b1ac44d5b 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -448,7 +448,7 @@ Selecting via a specific interval: .. ipython:: python - df.loc[pd.Interval(1.5, 3.0, "right")] + df.loc[pd.Interval(1.5, 3.0)] Selecting via a scalar value that is contained *in* the intervals. diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4f04d5a0ee69d..e4dd6fa091d80 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -584,18 +584,18 @@ this would previously return ``True`` for any ``Interval`` overlapping an ``Inte .. code-block:: python - In [4]: pd.Interval(1, 2, inclusive='neither') in ii + In [4]: pd.Interval(1, 2, closed='neither') in ii Out[4]: True - In [5]: pd.Interval(-10, 10, inclusive='both') in ii + In [5]: pd.Interval(-10, 10, closed='both') in ii Out[5]: True *New behavior*: .. ipython:: python - pd.Interval(1, 2, inclusive='neither') in ii - pd.Interval(-10, 10, inclusive='both') in ii + pd.Interval(1, 2, closed='neither') in ii + pd.Interval(-10, 10, closed='both') in ii The :meth:`~IntervalIndex.get_loc` method now only returns locations for exact matches to ``Interval`` queries, as opposed to the previous behavior of returning locations for overlapping matches. A ``KeyError`` will be raised if an exact match is not found. @@ -619,7 +619,7 @@ returning locations for overlapping matches. A ``KeyError`` will be raised if a In [7]: ii.get_loc(pd.Interval(2, 6)) --------------------------------------------------------------------------- - KeyError: Interval(2, 6, inclusive='right') + KeyError: Interval(2, 6, closed='right') Likewise, :meth:`~IntervalIndex.get_indexer` and :meth:`~IntervalIndex.get_indexer_non_unique` will also only return locations for exact matches to ``Interval`` queries, with ``-1`` denoting that an exact match was not found. @@ -680,11 +680,11 @@ Similarly, a ``KeyError`` will be raised for non-exact matches instead of return In [6]: s[pd.Interval(2, 3)] --------------------------------------------------------------------------- - KeyError: Interval(2, 3, inclusive='right') + KeyError: Interval(2, 3, closed='right') In [7]: s.loc[pd.Interval(2, 3)] --------------------------------------------------------------------------- - KeyError: Interval(2, 3, inclusive='right') + KeyError: Interval(2, 3, closed='right') The :meth:`~IntervalIndex.overlaps` method can be used to create a boolean indexer that replicates the previous behavior of returning overlapping matches. diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 48c808819d788..a7ed072fb0aac 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -823,17 +823,10 @@ Other Deprecations - Deprecated :attr:`Timedelta.freq` and :attr:`Timedelta.is_populated` (:issue:`46430`) - Deprecated :attr:`Timedelta.delta` (:issue:`46476`) - Deprecated passing arguments as positional in :meth:`DataFrame.any` and :meth:`Series.any` (:issue:`44802`) -- Deprecated the ``closed`` argument in :meth:`interval_range` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) - Deprecated the methods :meth:`DataFrame.mad`, :meth:`Series.mad`, and the corresponding groupby methods (:issue:`11787`) - Deprecated positional arguments to :meth:`Index.join` except for ``other``, use keyword-only arguments instead of positional arguments (:issue:`46518`) - Deprecated positional arguments to :meth:`StringMethods.rsplit` and :meth:`StringMethods.split` except for ``pat``, use keyword-only arguments instead of positional arguments (:issue:`47423`) - Deprecated indexing on a timezone-naive :class:`DatetimeIndex` using a string representing a timezone-aware datetime (:issue:`46903`, :issue:`36148`) -- Deprecated the ``closed`` argument in :class:`Interval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) -- Deprecated the ``closed`` argument in :class:`IntervalIndex` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) -- Deprecated the ``closed`` argument in :class:`IntervalDtype` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) -- Deprecated the ``closed`` argument in :class:`.IntervalArray` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) -- Deprecated :meth:`.IntervalArray.set_closed` and :meth:`.IntervalIndex.set_closed` in favor of ``set_inclusive``; In a future version ``set_closed`` will get removed (:issue:`40245`) -- Deprecated the ``closed`` argument in :class:`ArrowInterval` in favor of ``inclusive`` argument; In a future version passing ``closed`` will raise (:issue:`40245`) - Deprecated allowing ``unit="M"`` or ``unit="Y"`` in :class:`Timestamp` constructor with a non-round float value (:issue:`47267`) - Deprecated the ``display.column_space`` global configuration option (:issue:`7576`) - Deprecated the argument ``na_sentinel`` in :func:`factorize`, :meth:`Index.factorize`, and :meth:`.ExtensionArray.factorize`; pass ``use_na_sentinel=True`` instead to use the sentinel ``-1`` for NaN values and ``use_na_sentinel=False`` instead of ``na_sentinel=None`` to encode NaN values (:issue:`46910`) diff --git a/pandas/_libs/interval.pyi b/pandas/_libs/interval.pyi index 9b73e9d0bf54a..4c36246e04d23 100644 --- a/pandas/_libs/interval.pyi +++ b/pandas/_libs/interval.pyi @@ -8,14 +8,13 @@ from typing import ( import numpy as np import numpy.typing as npt -from pandas._libs import lib from pandas._typing import ( - IntervalInclusiveType, + IntervalClosedType, Timedelta, Timestamp, ) -VALID_INCLUSIVE: frozenset[str] +VALID_CLOSED: frozenset[str] _OrderableScalarT = TypeVar("_OrderableScalarT", int, float) _OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta) @@ -50,13 +49,7 @@ class IntervalMixin: def open_right(self) -> bool: ... @property def is_empty(self) -> bool: ... - def _check_inclusive_matches( - self, other: IntervalMixin, name: str = ... - ) -> None: ... - -def _warning_interval( - inclusive, closed -) -> tuple[IntervalInclusiveType, lib.NoDefault]: ... + def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ... class Interval(IntervalMixin, Generic[_OrderableT]): @property @@ -64,17 +57,14 @@ class Interval(IntervalMixin, Generic[_OrderableT]): @property def right(self: Interval[_OrderableT]) -> _OrderableT: ... @property - def inclusive(self) -> IntervalInclusiveType: ... - @property - def closed(self) -> IntervalInclusiveType: ... + def closed(self) -> IntervalClosedType: ... mid: _MidDescriptor length: _LengthDescriptor def __init__( self, left: _OrderableT, right: _OrderableT, - inclusive: IntervalInclusiveType = ..., - closed: IntervalInclusiveType = ..., + closed: IntervalClosedType = ..., ) -> None: ... def __hash__(self) -> int: ... @overload @@ -157,15 +147,15 @@ class Interval(IntervalMixin, Generic[_OrderableT]): def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ... def intervals_to_interval_bounds( - intervals: np.ndarray, validate_inclusive: bool = ... -) -> tuple[np.ndarray, np.ndarray, IntervalInclusiveType]: ... + intervals: np.ndarray, validate_closed: bool = ... +) -> tuple[np.ndarray, np.ndarray, str]: ... class IntervalTree(IntervalMixin): def __init__( self, left: np.ndarray, right: np.ndarray, - inclusive: IntervalInclusiveType = ..., + closed: IntervalClosedType = ..., leaf_size: int = ..., ) -> None: ... @property diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 2982110ea35cc..67c92a0f5df23 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -10,8 +10,6 @@ from cpython.datetime cimport ( import_datetime, ) -from pandas.util._exceptions import find_stack_level - import_datetime() cimport cython @@ -43,9 +41,6 @@ from numpy cimport ( cnp.import_array() -import warnings - -from pandas._libs import lib from pandas._libs cimport util from pandas._libs.hashtable cimport Int64Vector @@ -58,7 +53,7 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -VALID_INCLUSIVE = frozenset(['both', 'neither', 'left', 'right']) +VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) cdef class IntervalMixin: @@ -75,7 +70,7 @@ cdef class IntervalMixin: bool True if the Interval is closed on the left-side. """ - return self.inclusive in ('left', 'both') + return self.closed in ('left', 'both') @property def closed_right(self): @@ -87,9 +82,9 @@ cdef class IntervalMixin: Returns ------- bool - True if the Interval is closed on the right-side. + True if the Interval is closed on the left-side. """ - return self.inclusive in ('right', 'both') + return self.closed in ('right', 'both') @property def open_left(self): @@ -115,7 +110,7 @@ cdef class IntervalMixin: Returns ------- bool - True if the Interval is not closed on the right-side. + True if the Interval is not closed on the left-side. """ return not self.closed_right @@ -156,43 +151,43 @@ cdef class IntervalMixin: -------- An :class:`Interval` that contains points is not empty: - >>> pd.Interval(0, 1, inclusive='right').is_empty + >>> pd.Interval(0, 1, closed='right').is_empty False An ``Interval`` that does not contain any points is empty: - >>> pd.Interval(0, 0, inclusive='right').is_empty + >>> pd.Interval(0, 0, closed='right').is_empty True - >>> pd.Interval(0, 0, inclusive='left').is_empty + >>> pd.Interval(0, 0, closed='left').is_empty True - >>> pd.Interval(0, 0, inclusive='neither').is_empty + >>> pd.Interval(0, 0, closed='neither').is_empty True An ``Interval`` that contains a single point is not empty: - >>> pd.Interval(0, 0, inclusive='both').is_empty + >>> pd.Interval(0, 0, closed='both').is_empty False An :class:`~arrays.IntervalArray` or :class:`IntervalIndex` returns a boolean ``ndarray`` positionally indicating if an ``Interval`` is empty: - >>> ivs = [pd.Interval(0, 0, inclusive='neither'), - ... pd.Interval(1, 2, inclusive='neither')] + >>> ivs = [pd.Interval(0, 0, closed='neither'), + ... pd.Interval(1, 2, closed='neither')] >>> pd.arrays.IntervalArray(ivs).is_empty array([ True, False]) Missing values are not considered empty: - >>> ivs = [pd.Interval(0, 0, inclusive='neither'), np.nan] + >>> ivs = [pd.Interval(0, 0, closed='neither'), np.nan] >>> pd.IntervalIndex(ivs).is_empty array([ True, False]) """ - return (self.right == self.left) & (self.inclusive != 'both') + return (self.right == self.left) & (self.closed != 'both') - def _check_inclusive_matches(self, other, name='other'): + def _check_closed_matches(self, other, name='other'): """ - Check if the inclusive attribute of `other` matches. + Check if the closed attribute of `other` matches. Note that 'left' and 'right' are considered different from 'both'. @@ -205,44 +200,18 @@ cdef class IntervalMixin: Raises ------ ValueError - When `other` is not inclusive exactly the same as self. + When `other` is not closed exactly the same as self. """ - if self.inclusive != other.inclusive: - raise ValueError(f"'{name}.inclusive' is {repr(other.inclusive)}, " - f"expected {repr(self.inclusive)}.") + if self.closed != other.closed: + raise ValueError(f"'{name}.closed' is {repr(other.closed)}, " + f"expected {repr(self.closed)}.") cdef bint _interval_like(other): return (hasattr(other, 'left') and hasattr(other, 'right') - and hasattr(other, 'inclusive')) + and hasattr(other, 'closed')) -def _warning_interval(inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default): - """ - warning in interval class for variable inclusive and closed - """ - if inclusive is not None and closed != lib.no_default: - raise ValueError( - "Deprecated argument `closed` cannot be passed " - "if argument `inclusive` is not None" - ) - elif closed != lib.no_default: - warnings.warn( - "Argument `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - if closed is None: - inclusive = "right" - elif closed in ("both", "neither", "left", "right"): - inclusive = closed - else: - raise ValueError( - "Argument `closed` has to be either" - "'both', 'neither', 'left' or 'right'" - ) - - return inclusive, closed cdef class Interval(IntervalMixin): """ @@ -258,17 +227,9 @@ cdef class Interval(IntervalMixin): Whether the interval is closed on the left-side, right-side, both or neither. See the Notes for more detailed explanation. - .. deprecated:: 1.5.0 - - inclusive : {'both', 'neither', 'left', 'right'}, default 'both' - Whether the interval is inclusive on the left-side, right-side, both or - neither. See the Notes for more detailed explanation. - - .. versionadded:: 1.5.0 - See Also -------- - IntervalIndex : An Index of Interval objects that are all inclusive on the + IntervalIndex : An Index of Interval objects that are all closed on the same side. cut : Convert continuous data into discrete bins (Categorical of Interval objects). @@ -281,32 +242,32 @@ cdef class Interval(IntervalMixin): The parameters `left` and `right` must be from the same type, you must be able to compare them and they must satisfy ``left <= right``. - A inclusive interval (in mathematics denoted by square brackets) contains - its endpoints, i.e. the inclusive interval ``[0, 5]`` is characterized by the - conditions ``0 <= x <= 5``. This is what ``inclusive='both'`` stands for. + A closed interval (in mathematics denoted by square brackets) contains + its endpoints, i.e. the closed interval ``[0, 5]`` is characterized by the + conditions ``0 <= x <= 5``. This is what ``closed='both'`` stands for. An open interval (in mathematics denoted by parentheses) does not contain its endpoints, i.e. the open interval ``(0, 5)`` is characterized by the - conditions ``0 < x < 5``. This is what ``inclusive='neither'`` stands for. - Intervals can also be half-open or half-inclusive, i.e. ``[0, 5)`` is - described by ``0 <= x < 5`` (``inclusive='left'``) and ``(0, 5]`` is - described by ``0 < x <= 5`` (``inclusive='right'``). + conditions ``0 < x < 5``. This is what ``closed='neither'`` stands for. + Intervals can also be half-open or half-closed, i.e. ``[0, 5)`` is + described by ``0 <= x < 5`` (``closed='left'``) and ``(0, 5]`` is + described by ``0 < x <= 5`` (``closed='right'``). Examples -------- It is possible to build Intervals of different types, like numeric ones: - >>> iv = pd.Interval(left=0, right=5, inclusive='right') + >>> iv = pd.Interval(left=0, right=5) >>> iv - Interval(0, 5, inclusive='right') + Interval(0, 5, closed='right') You can check if an element belongs to it, or if it contains another interval: >>> 2.5 in iv True - >>> pd.Interval(left=2, right=5, inclusive='both') in iv + >>> pd.Interval(left=2, right=5, closed='both') in iv True - You can test the bounds (``inclusive='right'``, so ``0 < x <= 5``): + You can test the bounds (``closed='right'``, so ``0 < x <= 5``): >>> 0 in iv False @@ -326,16 +287,16 @@ cdef class Interval(IntervalMixin): >>> shifted_iv = iv + 3 >>> shifted_iv - Interval(3, 8, inclusive='right') + Interval(3, 8, closed='right') >>> extended_iv = iv * 10.0 >>> extended_iv - Interval(0.0, 50.0, inclusive='right') + Interval(0.0, 50.0, closed='right') To create a time interval you can use Timestamps as the bounds >>> year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'), ... pd.Timestamp('2018-01-01 00:00:00'), - ... inclusive='left') + ... closed='left') >>> pd.Timestamp('2017-01-01 00:00') in year_2017 True >>> year_2017.length @@ -354,27 +315,22 @@ cdef class Interval(IntervalMixin): Right bound for the interval. """ - cdef readonly str inclusive + cdef readonly str closed """ String describing the inclusive side the intervals. Either ``left``, ``right``, ``both`` or ``neither``. """ - def __init__(self, left, right, inclusive: str | None = None, closed: None | lib.NoDefault = lib.no_default): + def __init__(self, left, right, str closed='right'): # note: it is faster to just do these checks than to use a special # constructor (__cinit__/__new__) to avoid them self._validate_endpoint(left) self._validate_endpoint(right) - inclusive, closed = _warning_interval(inclusive, closed) - - if inclusive is None: - inclusive = "right" - - if inclusive not in VALID_INCLUSIVE: - raise ValueError(f"invalid option for 'inclusive': {inclusive}") + if closed not in VALID_CLOSED: + raise ValueError(f"invalid option for 'closed': {closed}") if not left <= right: raise ValueError("left side of interval must be <= right side") if (isinstance(left, _Timestamp) and @@ -384,23 +340,7 @@ cdef class Interval(IntervalMixin): f"{repr(left.tzinfo)}' and {repr(right.tzinfo)}") self.left = left self.right = right - self.inclusive = inclusive - - @property - def closed(self): - """ - String describing the inclusive side the intervals. - - .. deprecated:: 1.5.0 - - Either ``left``, ``right``, ``both`` or ``neither``. - """ - warnings.warn( - "Attribute `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - return self.inclusive + self.closed = closed def _validate_endpoint(self, endpoint): # GH 23013 @@ -410,12 +350,12 @@ cdef class Interval(IntervalMixin): "are allowed when constructing an Interval.") def __hash__(self): - return hash((self.left, self.right, self.inclusive)) + return hash((self.left, self.right, self.closed)) def __contains__(self, key) -> bool: if _interval_like(key): - key_closed_left = key.inclusive in ('left', 'both') - key_closed_right = key.inclusive in ('right', 'both') + key_closed_left = key.closed in ('left', 'both') + key_closed_right = key.closed in ('right', 'both') if self.open_left and key_closed_left: left_contained = self.left < key.left else: @@ -430,8 +370,8 @@ cdef class Interval(IntervalMixin): def __richcmp__(self, other, op: int): if isinstance(other, Interval): - self_tuple = (self.left, self.right, self.inclusive) - other_tuple = (other.left, other.right, other.inclusive) + self_tuple = (self.left, self.right, self.closed) + other_tuple = (other.left, other.right, other.closed) return PyObject_RichCompare(self_tuple, other_tuple, op) elif util.is_array(other): return np.array( @@ -442,7 +382,7 @@ cdef class Interval(IntervalMixin): return NotImplemented def __reduce__(self): - args = (self.left, self.right, self.inclusive) + args = (self.left, self.right, self.closed) return (type(self), args) def _repr_base(self): @@ -460,7 +400,7 @@ cdef class Interval(IntervalMixin): left, right = self._repr_base() name = type(self).__name__ - repr_str = f'{name}({repr(left)}, {repr(right)}, inclusive={repr(self.inclusive)})' + repr_str = f'{name}({repr(left)}, {repr(right)}, closed={repr(self.closed)})' return repr_str def __str__(self) -> str: @@ -476,7 +416,7 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(y) or is_timedelta64_object(y) ): - return Interval(self.left + y, self.right + y, inclusive=self.inclusive) + return Interval(self.left + y, self.right + y, closed=self.closed) elif ( # __radd__ pattern # TODO(cython3): remove this @@ -487,7 +427,7 @@ cdef class Interval(IntervalMixin): or is_timedelta64_object(self) ) ): - return Interval(y.left + self, y.right + self, inclusive=y.inclusive) + return Interval(y.left + self, y.right + self, closed=y.closed) return NotImplemented def __radd__(self, other): @@ -496,7 +436,7 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(other) or is_timedelta64_object(other) ): - return Interval(self.left + other, self.right + other, inclusive=self.inclusive) + return Interval(self.left + other, self.right + other, closed=self.closed) return NotImplemented def __sub__(self, y): @@ -505,40 +445,39 @@ cdef class Interval(IntervalMixin): or PyDelta_Check(y) or is_timedelta64_object(y) ): - return Interval(self.left - y, self.right - y, inclusive=self.inclusive) + return Interval(self.left - y, self.right - y, closed=self.closed) return NotImplemented def __mul__(self, y): if isinstance(y, numbers.Number): - return Interval(self.left * y, self.right * y, inclusive=self.inclusive) + return Interval(self.left * y, self.right * y, closed=self.closed) elif isinstance(y, Interval) and isinstance(self, numbers.Number): # __radd__ semantics # TODO(cython3): remove this - return Interval(y.left * self, y.right * self, inclusive=y.inclusive) - + return Interval(y.left * self, y.right * self, closed=y.closed) return NotImplemented def __rmul__(self, other): if isinstance(other, numbers.Number): - return Interval(self.left * other, self.right * other, inclusive=self.inclusive) + return Interval(self.left * other, self.right * other, closed=self.closed) return NotImplemented def __truediv__(self, y): if isinstance(y, numbers.Number): - return Interval(self.left / y, self.right / y, inclusive=self.inclusive) + return Interval(self.left / y, self.right / y, closed=self.closed) return NotImplemented def __floordiv__(self, y): if isinstance(y, numbers.Number): return Interval( - self.left // y, self.right // y, inclusive=self.inclusive) + self.left // y, self.right // y, closed=self.closed) return NotImplemented def overlaps(self, other): """ Check whether two Interval objects overlap. - Two intervals overlap if they share a common point, including inclusive + Two intervals overlap if they share a common point, including closed endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -567,16 +506,16 @@ cdef class Interval(IntervalMixin): >>> i1.overlaps(i3) False - Intervals that share inclusive endpoints overlap: + Intervals that share closed endpoints overlap: - >>> i4 = pd.Interval(0, 1, inclusive='both') - >>> i5 = pd.Interval(1, 2, inclusive='both') + >>> i4 = pd.Interval(0, 1, closed='both') + >>> i5 = pd.Interval(1, 2, closed='both') >>> i4.overlaps(i5) True Intervals that only have an open endpoint in common do not overlap: - >>> i6 = pd.Interval(1, 2, inclusive='neither') + >>> i6 = pd.Interval(1, 2, closed='neither') >>> i4.overlaps(i6) False """ @@ -584,7 +523,7 @@ cdef class Interval(IntervalMixin): raise TypeError("`other` must be an Interval, " f"got {type(other).__name__}") - # equality is okay if both endpoints are inclusive (overlap at a point) + # equality is okay if both endpoints are closed (overlap at a point) op1 = le if (self.closed_left and other.closed_right) else lt op2 = le if (other.closed_left and self.closed_right) else lt @@ -596,29 +535,29 @@ cdef class Interval(IntervalMixin): @cython.wraparound(False) @cython.boundscheck(False) -def intervals_to_interval_bounds(ndarray intervals, bint validate_inclusive=True): +def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): """ Parameters ---------- intervals : ndarray Object array of Intervals / nulls. - validate_inclusive: bool, default True - Boolean indicating if all intervals must be inclusive on the same side. - Mismatching inclusive will raise if True, else return None for inclusive. + validate_closed: bool, default True + Boolean indicating if all intervals must be closed on the same side. + Mismatching closed will raise if True, else return None for closed. Returns ------- tuple of left : ndarray right : ndarray - inclusive: str + closed: str """ cdef: - object inclusive = None, interval + object closed = None, interval Py_ssize_t i, n = len(intervals) ndarray left, right - bint seen_inclusive = False + bint seen_closed = False left = np.empty(n, dtype=intervals.dtype) right = np.empty(n, dtype=intervals.dtype) @@ -636,15 +575,15 @@ def intervals_to_interval_bounds(ndarray intervals, bint validate_inclusive=True left[i] = interval.left right[i] = interval.right - if not seen_inclusive: - seen_inclusive = True - inclusive = interval.inclusive - elif inclusive != interval.inclusive: - inclusive = None - if validate_inclusive: - raise ValueError("intervals must all be inclusive on the same side") - - return left, right, inclusive + if not seen_closed: + seen_closed = True + closed = interval.closed + elif closed != interval.closed: + closed = None + if validate_closed: + raise ValueError("intervals must all be closed on the same side") + + return left, right, closed include "intervaltree.pxi" diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index 8bf1a53d56dfb..e7a310513d2fa 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -3,9 +3,7 @@ Template for intervaltree WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in """ -import warnings -from pandas._libs import lib from pandas._libs.algos import is_monotonic ctypedef fused int_scalar_t: @@ -36,34 +34,27 @@ cdef class IntervalTree(IntervalMixin): ndarray left, right IntervalNode root object dtype - str inclusive + str closed object _is_overlapping, _left_sorter, _right_sorter Py_ssize_t _na_count - def __init__(self, left, right, inclusive: str | None = None, leaf_size=100): + def __init__(self, left, right, closed='right', leaf_size=100): """ Parameters ---------- left, right : np.ndarray[ndim=1] Left and right bounds for each interval. Assumed to contain no NaNs. - - inclusive : {"both", "neither", "left", "right"}, optional + closed : {'left', 'right', 'both', 'neither'}, optional Whether the intervals are closed on the left-side, right-side, both or neither. Defaults to 'right'. - - .. versionadded:: 1.5.0 - leaf_size : int, optional Parameter that controls when the tree switches from creating nodes to brute-force search. Tune this parameter to optimize query performance. """ - if inclusive is None: - inclusive = "right" - - if inclusive not in ['left', 'right', 'both', 'neither']: - raise ValueError("invalid option for 'inclusive': %s" % inclusive) + if closed not in ['left', 'right', 'both', 'neither']: + raise ValueError("invalid option for 'closed': %s" % closed) left = np.asarray(left) right = np.asarray(right) @@ -73,7 +64,7 @@ cdef class IntervalTree(IntervalMixin): indices = np.arange(len(left), dtype='int64') - self.inclusive = inclusive + self.closed = closed # GH 23352: ensure no nan in nodes mask = ~np.isnan(self.left) @@ -82,7 +73,7 @@ cdef class IntervalTree(IntervalMixin): self.right = self.right[mask] indices = indices[mask] - node_cls = NODE_CLASSES[str(self.dtype), inclusive] + node_cls = NODE_CLASSES[str(self.dtype), closed] self.root = node_cls(self.left, self.right, indices, leaf_size) @property @@ -110,8 +101,8 @@ cdef class IntervalTree(IntervalMixin): if self._is_overlapping is not None: return self._is_overlapping - # <= when inclusive on both sides since endpoints can overlap - op = le if self.inclusive == 'both' else lt + # <= when both sides closed since endpoints can overlap + op = le if self.closed == 'both' else lt # overlap if start of current interval < end of previous interval # (current and previous in terms of sorted order by left/start side) @@ -189,9 +180,9 @@ cdef class IntervalTree(IntervalMixin): missing.to_array().astype('intp')) def __repr__(self) -> str: - return ('<IntervalTree[{dtype},{inclusive}]: ' + return ('<IntervalTree[{dtype},{closed}]: ' '{n_elements} elements>'.format( - dtype=self.dtype, inclusive=self.inclusive, + dtype=self.dtype, closed=self.closed, n_elements=self.root.n_elements)) # compat with IndexEngine interface @@ -254,13 +245,13 @@ cdef class IntervalNode: # we need specialized nodes and leaves to optimize for different dtype and -# inclusive values +# closed values {{py: nodes = [] for dtype in ['float64', 'int64', 'uint64']: - for inclusive, cmp_left, cmp_right in [ + for closed, cmp_left, cmp_right in [ ('left', '<=', '<'), ('right', '<', '<='), ('both', '<=', '<='), @@ -274,7 +265,7 @@ for dtype in ['float64', 'int64', 'uint64']: elif dtype.startswith('float'): fused_prefix = '' nodes.append((dtype, dtype.title(), - inclusive, inclusive.title(), + closed, closed.title(), cmp_left, cmp_right, cmp_left_converse, diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 8e4b23f32f48c..65677bbdb0ea9 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2164,7 +2164,7 @@ cpdef bint is_interval_array(ndarray values): """ cdef: Py_ssize_t i, n = len(values) - str inclusive = None + str closed = None bint numeric = False bint dt64 = False bint td64 = False @@ -2177,15 +2177,15 @@ cpdef bint is_interval_array(ndarray values): val = values[i] if is_interval(val): - if inclusive is None: - inclusive = val.inclusive + if closed is None: + closed = val.closed numeric = ( util.is_float_object(val.left) or util.is_integer_object(val.left) ) td64 = is_timedelta(val.left) dt64 = PyDateTime_Check(val.left) - elif val.inclusive != inclusive: + elif val.closed != closed: # mismatched closedness return False elif numeric: @@ -2208,7 +2208,7 @@ cpdef bint is_interval_array(ndarray values): else: return False - if inclusive is None: + if closed is None: # we saw all-NAs, no actual Intervals return False return True diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 5e0af3c0bc07d..945639ef4b00a 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -607,7 +607,7 @@ def assert_interval_array_equal( assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) - assert_attr_equal("inclusive", left, right, obj=obj) + assert_attr_equal("closed", left, right, obj=obj) def assert_period_array_equal(left, right, obj="PeriodArray") -> None: diff --git a/pandas/_typing.py b/pandas/_typing.py index 88d826ec454b2..08096569c61a7 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -313,7 +313,7 @@ def closed(self) -> bool: # Interval closed type IntervalLeftRight = Literal["left", "right"] -IntervalInclusiveType = Union[IntervalLeftRight, Literal["both", "neither"]] +IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] # datetime and NaTType DatetimeNaTType = Union[datetime, "NaTType"] diff --git a/pandas/conftest.py b/pandas/conftest.py index 54c24b4c0b58a..5f7b6d509c233 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -620,7 +620,7 @@ def _create_mi_with_dt64tz_level(): "bool-object": tm.makeBoolIndex(10).astype(object), "bool-dtype": Index(np.random.randn(10) < 0), "categorical": tm.makeCategoricalIndex(100), - "interval": tm.makeIntervalIndex(100, inclusive="right"), + "interval": tm.makeIntervalIndex(100), "empty": Index([]), "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])), "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(), @@ -933,14 +933,8 @@ def rand_series_with_duplicate_datetimeindex() -> Series: # ---------------------------------------------------------------- @pytest.fixture( params=[ - ( - Interval(left=0, right=5, inclusive="right"), - IntervalDtype("int64", inclusive="right"), - ), - ( - Interval(left=0.1, right=0.5, inclusive="right"), - IntervalDtype("float64", inclusive="right"), - ), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")), (Period("2012-01", freq="M"), "period[M]"), (Period("2012-02-01", freq="D"), "period[D]"), ( diff --git a/pandas/core/arrays/arrow/extension_types.py b/pandas/core/arrays/arrow/extension_types.py index a2b3c6d4da080..c9badb2bd305b 100644 --- a/pandas/core/arrays/arrow/extension_types.py +++ b/pandas/core/arrays/arrow/extension_types.py @@ -1,15 +1,12 @@ from __future__ import annotations import json -import warnings import pyarrow -from pandas._typing import IntervalInclusiveType -from pandas.util._decorators import deprecate_kwarg -from pandas.util._exceptions import find_stack_level +from pandas._typing import IntervalClosedType -from pandas.core.arrays.interval import VALID_INCLUSIVE +from pandas.core.arrays.interval import VALID_CLOSED class ArrowPeriodType(pyarrow.ExtensionType): @@ -53,12 +50,11 @@ def to_pandas_dtype(self): class ArrowIntervalType(pyarrow.ExtensionType): - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") - def __init__(self, subtype, inclusive: IntervalInclusiveType) -> None: + def __init__(self, subtype, closed: IntervalClosedType) -> None: # attributes need to be set first before calling # super init (as that calls serialize) - assert inclusive in VALID_INCLUSIVE - self._inclusive: IntervalInclusiveType = inclusive + assert closed in VALID_CLOSED + self._closed: IntervalClosedType = closed if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) self._subtype = subtype @@ -71,46 +67,37 @@ def subtype(self): return self._subtype @property - def inclusive(self) -> IntervalInclusiveType: - return self._inclusive - - @property - def closed(self) -> IntervalInclusiveType: - warnings.warn( - "Attribute `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(), - ) - return self._inclusive + def closed(self) -> IntervalClosedType: + return self._closed def __arrow_ext_serialize__(self) -> bytes: - metadata = {"subtype": str(self.subtype), "inclusive": self.inclusive} + metadata = {"subtype": str(self.subtype), "closed": self.closed} return json.dumps(metadata).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType: metadata = json.loads(serialized.decode()) subtype = pyarrow.type_for_alias(metadata["subtype"]) - inclusive = metadata["inclusive"] - return ArrowIntervalType(subtype, inclusive) + closed = metadata["closed"] + return ArrowIntervalType(subtype, closed) def __eq__(self, other): if isinstance(other, pyarrow.BaseExtensionType): return ( type(self) == type(other) and self.subtype == other.subtype - and self.inclusive == other.inclusive + and self.closed == other.closed ) else: return NotImplemented def __hash__(self) -> int: - return hash((str(self), str(self.subtype), self.inclusive)) + return hash((str(self), str(self.subtype), self.closed)) def to_pandas_dtype(self): import pandas as pd - return pd.IntervalDtype(self.subtype.to_pandas_dtype(), self.inclusive) + return pd.IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) # register the type with a dummy instance diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index e6def0f4d9402..8f01dfaf867e7 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1,6 +1,5 @@ from __future__ import annotations -import inspect import operator from operator import ( le, @@ -16,7 +15,6 @@ cast, overload, ) -import warnings import numpy as np @@ -24,7 +22,7 @@ from pandas._libs import lib from pandas._libs.interval import ( - VALID_INCLUSIVE, + VALID_CLOSED, Interval, IntervalMixin, intervals_to_interval_bounds, @@ -33,7 +31,7 @@ from pandas._typing import ( ArrayLike, Dtype, - IntervalInclusiveType, + IntervalClosedType, NpDtype, PositionalIndexer, ScalarIndexer, @@ -44,10 +42,8 @@ from pandas.errors import IntCastingNaNError from pandas.util._decorators import ( Appender, - deprecate_kwarg, deprecate_nonkeyword_arguments, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import LossySetitemError from pandas.core.dtypes.common import ( @@ -130,8 +126,8 @@ data : array-like (1-dimensional) Array-like containing Interval objects from which to build the %(klass)s. -inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are inclusive on the left-side, right-side, both or +closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or neither. dtype : dtype or None, default None If None, dtype will be inferred. @@ -145,7 +141,7 @@ ---------- left right -inclusive +closed mid length is_empty @@ -160,7 +156,6 @@ contains overlaps set_closed -set_inclusive to_tuples %(extra_methods)s\ @@ -186,8 +181,7 @@ _interval_shared_docs["class"] % { "klass": "IntervalArray", - "summary": "Pandas array for interval data that are inclusive on the same " - "side.", + "summary": "Pandas array for interval data that are closed on the same side.", "versionadded": "0.24.0", "name": "", "extra_attributes": "", @@ -199,8 +193,7 @@ A new ``IntervalArray`` can be constructed directly from an array-like of ``Interval`` objects: - >>> pd.arrays.IntervalArray([pd.Interval(0, 1, "right"), - ... pd.Interval(1, 5, "right")]) + >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) <IntervalArray> [(0, 1], (1, 5]] Length: 2, dtype: interval[int64, right] @@ -228,11 +221,10 @@ def ndim(self) -> Literal[1]: # --------------------------------------------------------------------- # Constructors - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def __new__( cls: type[IntervalArrayT], data, - inclusive: IntervalInclusiveType | None = None, + closed=None, dtype: Dtype | None = None, copy: bool = False, verify_integrity: bool = True, @@ -243,7 +235,7 @@ def __new__( if isinstance(data, cls): left = data._left right = data._right - inclusive = inclusive or data.inclusive + closed = closed or data.closed else: # don't allow scalars @@ -256,41 +248,39 @@ def __new__( # might need to convert empty or purely na data data = _maybe_convert_platform_interval(data) - left, right, infer_inclusive = intervals_to_interval_bounds( - data, validate_inclusive=inclusive is None + left, right, infer_closed = intervals_to_interval_bounds( + data, validate_closed=closed is None ) if left.dtype == object: left = lib.maybe_convert_objects(left) right = lib.maybe_convert_objects(right) - inclusive = inclusive or infer_inclusive + closed = closed or infer_closed return cls._simple_new( left, right, - inclusive=inclusive, + closed, copy=copy, dtype=dtype, verify_integrity=verify_integrity, ) @classmethod - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def _simple_new( cls: type[IntervalArrayT], left, right, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType | None = None, copy: bool = False, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> IntervalArrayT: result = IntervalMixin.__new__(cls) - if inclusive is None and isinstance(dtype, IntervalDtype): - inclusive = dtype.inclusive - - inclusive = inclusive or "right" + if closed is None and isinstance(dtype, IntervalDtype): + closed = dtype.closed + closed = closed or "right" left = ensure_index(left, copy=copy) right = ensure_index(right, copy=copy) @@ -305,11 +295,12 @@ def _simple_new( else: msg = f"dtype must be an IntervalDtype, got {dtype}" raise TypeError(msg) - if dtype.inclusive is None: + + if dtype.closed is None: # possibly loading an old pickle - dtype = IntervalDtype(dtype.subtype, inclusive) - elif inclusive != dtype.inclusive: - raise ValueError("inclusive keyword does not match dtype.inclusive") + dtype = IntervalDtype(dtype.subtype, closed) + elif closed != dtype.closed: + raise ValueError("closed keyword does not match dtype.closed") # coerce dtypes to match if needed if is_float_dtype(left) and is_integer_dtype(right): @@ -352,7 +343,7 @@ def _simple_new( # If these share data, then setitem could corrupt our IA right = right.copy() - dtype = IntervalDtype(left.dtype, inclusive=inclusive) + dtype = IntervalDtype(left.dtype, closed=closed) result._dtype = dtype result._left = left @@ -380,7 +371,7 @@ def _from_factorized( # a new IA from an (empty) object-dtype array, so turn it into the # correct dtype. values = values.astype(original.dtype.subtype) - return cls(values, inclusive=original.inclusive) + return cls(values, closed=original.closed) _interval_shared_docs["from_breaks"] = textwrap.dedent( """ @@ -390,8 +381,8 @@ def _from_factorized( ---------- breaks : array-like (1-dimensional) Left and right bounds for each interval. - inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are inclusive on the left-side, right-side, both + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False Copy the data. @@ -421,7 +412,7 @@ def _from_factorized( """\ Examples -------- - >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3], "right") + >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3]) <IntervalArray> [(0, 1], (1, 2], (2, 3]] Length: 3, dtype: interval[int64, right] @@ -429,22 +420,16 @@ def _from_factorized( ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_breaks( cls: type[IntervalArrayT], breaks, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType | None = "right", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: - if inclusive is None: - inclusive = "right" - breaks = _maybe_convert_platform_interval(breaks) - return cls.from_arrays( - breaks[:-1], breaks[1:], inclusive, copy=copy, dtype=dtype - ) + return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) _interval_shared_docs["from_arrays"] = textwrap.dedent( """ @@ -456,8 +441,8 @@ def from_breaks( Left bounds for each interval. right : array-like (1-dimensional) Right bounds for each interval. - inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are inclusive on the left-side, right-side, both + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False Copy the data. @@ -502,7 +487,7 @@ def from_breaks( "klass": "IntervalArray", "examples": textwrap.dedent( """\ - >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3], inclusive="right") + >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) <IntervalArray> [(0, 1], (1, 2], (2, 3]] Length: 3, dtype: interval[int64, right] @@ -510,29 +495,19 @@ def from_breaks( ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_arrays( cls: type[IntervalArrayT], left, right, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType | None = "right", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: - - if inclusive is None: - inclusive = "right" - left = _maybe_convert_platform_interval(left) right = _maybe_convert_platform_interval(right) return cls._simple_new( - left, - right, - inclusive=inclusive, - copy=copy, - dtype=dtype, - verify_integrity=True, + left, right, closed, copy=copy, dtype=dtype, verify_integrity=True ) _interval_shared_docs["from_tuples"] = textwrap.dedent( @@ -543,8 +518,8 @@ def from_arrays( ---------- data : array-like (1-dimensional) Array of tuples. - inclusive : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are inclusive on the left-side, right-side, both + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both or neither. copy : bool, default False By-default copy the data, this is compat only and ignored. @@ -576,7 +551,7 @@ def from_arrays( """\ Examples -------- - >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)], inclusive="right") + >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)]) <IntervalArray> [(0, 1], (1, 2]] Length: 2, dtype: interval[int64, right] @@ -584,17 +559,13 @@ def from_arrays( ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_tuples( cls: type[IntervalArrayT], data, - inclusive: IntervalInclusiveType | None = None, + closed="right", copy: bool = False, dtype: Dtype | None = None, ) -> IntervalArrayT: - if inclusive is None: - inclusive = "right" - if len(data): left, right = [], [] else: @@ -618,7 +589,7 @@ def from_tuples( left.append(lhs) right.append(rhs) - return cls.from_arrays(left, right, inclusive, copy=False, dtype=dtype) + return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) def _validate(self): """ @@ -626,13 +597,13 @@ def _validate(self): Checks that - * inclusive is valid + * closed is valid * left and right match lengths * left and right have the same missing values * left is always below right """ - if self.inclusive not in VALID_INCLUSIVE: - msg = f"invalid option for 'inclusive': {self.inclusive}" + if self.closed not in VALID_CLOSED: + msg = f"invalid option for 'closed': {self.closed}" raise ValueError(msg) if len(self._left) != len(self._right): msg = "left and right must have the same length" @@ -660,9 +631,7 @@ def _shallow_copy(self: IntervalArrayT, left, right) -> IntervalArrayT: right : Index Values to be used for the right-side of the intervals. """ - return self._simple_new( - left, right, inclusive=self.inclusive, verify_integrity=False - ) + return self._simple_new(left, right, closed=self.closed, verify_integrity=False) # --------------------------------------------------------------------- # Descriptive @@ -708,7 +677,7 @@ def __getitem__( # scalar if is_scalar(left) and isna(left): return self._fill_value - return Interval(left, right, inclusive=self.inclusive) + return Interval(left, right, self.closed) if np.ndim(left) > 1: # GH#30588 multi-dimensional indexer disallowed raise ValueError("multi-dimensional indexing not allowed") @@ -747,18 +716,18 @@ def _cmp_method(self, other, op): # for categorical defer to categories for dtype other_dtype = other.categories.dtype - # extract intervals if we have interval categories with matching inclusive + # extract intervals if we have interval categories with matching closed if is_interval_dtype(other_dtype): - if self.inclusive != other.categories.inclusive: + if self.closed != other.categories.closed: return invalid_comparison(self, other, op) other = other.categories.take( other.codes, allow_fill=True, fill_value=other.categories._na_value ) - # interval-like -> need same inclusive and matching endpoints + # interval-like -> need same closed and matching endpoints if is_interval_dtype(other_dtype): - if self.inclusive != other.inclusive: + if self.closed != other.closed: return invalid_comparison(self, other, op) elif not isinstance(other, Interval): other = type(self)(other) @@ -974,7 +943,7 @@ def equals(self, other) -> bool: return False return bool( - self.inclusive == other.inclusive + self.closed == other.closed and self.left.equals(other.left) and self.right.equals(other.right) ) @@ -994,14 +963,14 @@ def _concat_same_type( ------- IntervalArray """ - inclusive_set = {interval.inclusive for interval in to_concat} - if len(inclusive_set) != 1: - raise ValueError("Intervals must all be inclusive on the same side.") - inclusive = inclusive_set.pop() + closed_set = {interval.closed for interval in to_concat} + if len(closed_set) != 1: + raise ValueError("Intervals must all be closed on the same side.") + closed = closed_set.pop() left = np.concatenate([interval.left for interval in to_concat]) right = np.concatenate([interval.right for interval in to_concat]) - return cls._simple_new(left, right, inclusive=inclusive, copy=False) + return cls._simple_new(left, right, closed=closed, copy=False) def copy(self: IntervalArrayT) -> IntervalArrayT: """ @@ -1013,9 +982,9 @@ def copy(self: IntervalArrayT) -> IntervalArrayT: """ left = self._left.copy() right = self._right.copy() - inclusive = self.inclusive + closed = self.closed # TODO: Could skip verify_integrity here. - return type(self).from_arrays(left, right, inclusive=inclusive) + return type(self).from_arrays(left, right, closed=closed) def isna(self) -> np.ndarray: return isna(self._left) @@ -1037,7 +1006,7 @@ def shift(self, periods: int = 1, fill_value: object = None) -> IntervalArray: from pandas import Index fill_value = Index(self._left, copy=False)._na_value - empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1), "right") + empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) else: empty = self._from_sequence([fill_value] * empty_len) @@ -1122,7 +1091,7 @@ def _validate_listlike(self, value): # list-like of intervals try: array = IntervalArray(value) - self._check_inclusive_matches(array, name="value") + self._check_closed_matches(array, name="value") value_left, value_right = array.left, array.right except TypeError as err: # wrong type: not interval or NA @@ -1142,7 +1111,7 @@ def _validate_listlike(self, value): def _validate_scalar(self, value): if isinstance(value, Interval): - self._check_inclusive_matches(value, name="value") + self._check_closed_matches(value, name="value") left, right = value.left, value.right # TODO: check subdtype match like _validate_setitem_value? elif is_valid_na_for_dtype(value, self.left.dtype): @@ -1167,8 +1136,8 @@ def _validate_setitem_value(self, value): value_left, value_right = value, value elif isinstance(value, Interval): - # scalar - self._check_inclusive_matches(value, name="value") + # scalar interval + self._check_closed_matches(value, name="value") value_left, value_right = value.left, value.right self.left._validate_fill_value(value_left) self.left._validate_fill_value(value_right) @@ -1292,7 +1261,7 @@ def mid(self) -> Index: """ Check elementwise if an Interval overlaps the values in the %(klass)s. - Two intervals overlap if they share a common point, including inclusive + Two intervals overlap if they share a common point, including closed endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -1316,14 +1285,14 @@ def mid(self) -> Index: >>> intervals.overlaps(pd.Interval(0.5, 1.5)) array([ True, True, False]) - Intervals that share inclusive endpoints overlap: + Intervals that share closed endpoints overlap: - >>> intervals.overlaps(pd.Interval(1, 3, inclusive='left')) + >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) array([ True, True, True]) Intervals that only have an open endpoint in common do not overlap: - >>> intervals.overlaps(pd.Interval(1, 2, inclusive='right')) + >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) array([False, True, False]) """ ) @@ -1335,7 +1304,7 @@ def mid(self) -> Index: "examples": textwrap.dedent( """\ >>> data = [(0, 1), (1, 3), (2, 4)] - >>> intervals = pd.arrays.IntervalArray.from_tuples(data, "right") + >>> intervals = pd.arrays.IntervalArray.from_tuples(data) >>> intervals <IntervalArray> [(0, 1], (1, 3], (2, 4]] @@ -1351,7 +1320,7 @@ def overlaps(self, other): msg = f"`other` must be Interval-like, got {type(other).__name__}" raise TypeError(msg) - # equality is okay if both endpoints are inclusive (overlap at a point) + # equality is okay if both endpoints are closed (overlap at a point) op1 = le if (self.closed_left and other.closed_right) else lt op2 = le if (other.closed_left and self.closed_right) else lt @@ -1363,34 +1332,18 @@ def overlaps(self, other): # --------------------------------------------------------------------- @property - def inclusive(self) -> IntervalInclusiveType: + def closed(self) -> IntervalClosedType: """ String describing the inclusive side the intervals. Either ``left``, ``right``, ``both`` or ``neither``. """ - return self.dtype.inclusive - - @property - def closed(self) -> IntervalInclusiveType: - """ - String describing the inclusive side the intervals. - - Either ``left``, ``right``, ``both`` or ``neither`. - """ - warnings.warn( - "Attribute `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - return self.dtype.inclusive + return self.dtype.closed _interval_shared_docs["set_closed"] = textwrap.dedent( """ Return an identical %(klass)s closed on the specified side. - .. deprecated:: 1.5.0 - Parameters ---------- closed : {'left', 'right', 'both', 'neither'} @@ -1413,7 +1366,7 @@ def closed(self) -> IntervalInclusiveType: """\ Examples -------- - >>> index = pd.arrays.IntervalArray.from_breaks(range(4), "right") + >>> index = pd.arrays.IntervalArray.from_breaks(range(4)) >>> index <IntervalArray> [(0, 1], (1, 2], (2, 3]] @@ -1426,70 +1379,13 @@ def closed(self) -> IntervalInclusiveType: ), } ) - def set_closed( - self: IntervalArrayT, closed: IntervalInclusiveType - ) -> IntervalArrayT: - warnings.warn( - "set_closed is deprecated and will be removed in a future version. " - "Use set_inclusive instead.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - return self.set_inclusive(closed) - - _interval_shared_docs["set_inclusive"] = textwrap.dedent( - """ - Return an identical %(klass)s but closed on the specified side. - - .. versionadded:: 1.5 - - Parameters - ---------- - inclusive : {'left', 'right', 'both', 'neither'} - Whether the intervals are closed on the left-side, right-side, both - or neither. - - Returns - ------- - new_index : %(klass)s - - %(examples)s\ - """ - ) - - @Appender( - _interval_shared_docs["set_inclusive"] - % { - "klass": "IntervalArray", - "examples": textwrap.dedent( - """\ - Examples - -------- - >>> index = pd.arrays.IntervalArray.from_breaks(range(4), "right") - >>> index - <IntervalArray> - [(0, 1], (1, 2], (2, 3]] - Length: 3, dtype: interval[int64, right] - >>> index.set_inclusive('both') - <IntervalArray> - [[0, 1], [1, 2], [2, 3]] - Length: 3, dtype: interval[int64, both] - """ - ), - } - ) - def set_inclusive( - self: IntervalArrayT, inclusive: IntervalInclusiveType - ) -> IntervalArrayT: - if inclusive not in VALID_INCLUSIVE: - msg = f"invalid option for 'inclusive': {inclusive}" + def set_closed(self: IntervalArrayT, closed: IntervalClosedType) -> IntervalArrayT: + if closed not in VALID_CLOSED: + msg = f"invalid option for 'closed': {closed}" raise ValueError(msg) return type(self)._simple_new( - left=self._left, - right=self._right, - inclusive=inclusive, - verify_integrity=False, + left=self._left, right=self._right, closed=closed, verify_integrity=False ) _interval_shared_docs[ @@ -1512,15 +1408,15 @@ def is_non_overlapping_monotonic(self) -> bool: # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) # we already require left <= right - # strict inequality for inclusive == 'both'; equality implies overlapping + # strict inequality for closed == 'both'; equality implies overlapping # at a point when both sides of intervals are included - if self.inclusive == "both": + if self.closed == "both": return bool( (self._right[:-1] < self._left[1:]).all() or (self._left[:-1] > self._right[1:]).all() ) - # non-strict inequality when inclusive != 'both'; at least one side is + # non-strict inequality when closed != 'both'; at least one side is # not included in the intervals, so equality does not imply overlapping return bool( (self._right[:-1] <= self._left[1:]).all() @@ -1538,14 +1434,14 @@ def __array__(self, dtype: NpDtype | None = None) -> np.ndarray: left = self._left right = self._right mask = self.isna() - inclusive = self.inclusive + closed = self.closed result = np.empty(len(left), dtype=object) for i in range(len(left)): if mask[i]: result[i] = np.nan else: - result[i] = Interval(left[i], right[i], inclusive=inclusive) + result[i] = Interval(left[i], right[i], closed) return result def __arrow_array__(self, type=None): @@ -1563,7 +1459,7 @@ def __arrow_array__(self, type=None): f"Conversion to arrow with subtype '{self.dtype.subtype}' " "is not supported" ) from err - interval_type = ArrowIntervalType(subtype, self.inclusive) + interval_type = ArrowIntervalType(subtype, self.closed) storage_array = pyarrow.StructArray.from_arrays( [ pyarrow.array(self._left, type=subtype, from_pandas=True), @@ -1586,13 +1482,12 @@ def __arrow_array__(self, type=None): if type.equals(interval_type.storage_type): return storage_array elif isinstance(type, ArrowIntervalType): - # ensure we have the same subtype and inclusive attributes + # ensure we have the same subtype and closed attributes if not type.equals(interval_type): raise TypeError( "Not supported to convert IntervalArray to type with " f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " - f"and 'inclusive' ({self.inclusive} vs {type.inclusive}) " - f"attributes" + f"and 'closed' ({self.closed} vs {type.closed}) attributes" ) else: raise TypeError( @@ -1720,8 +1615,7 @@ def repeat( "klass": "IntervalArray", "examples": textwrap.dedent( """\ - >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)] - ... , "right") + >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) >>> intervals <IntervalArray> [(0, 1], (1, 3], (2, 4]] @@ -1744,7 +1638,7 @@ def isin(self, values) -> npt.NDArray[np.bool_]: values = extract_array(values, extract_numpy=True) if is_interval_dtype(values.dtype): - if self.inclusive != values.inclusive: + if self.closed != values.closed: # not comparable -> no overlap return np.zeros(self.shape, dtype=bool) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 3dfc544273a64..4244217da7865 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -516,7 +516,7 @@ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: elif isinstance(dtype, IntervalDtype): # TODO(GH#45349): don't special-case IntervalDtype, allow # overriding instead of returning object below. - return IntervalDtype(np.float64, inclusive=dtype.inclusive) + return IntervalDtype(np.float64, closed=dtype.closed) return _dtype_obj elif dtype.kind == "b": return _dtype_obj @@ -841,7 +841,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj, dtype = PeriodDtype(freq=val.freq) elif lib.is_interval(val): subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0] - dtype = IntervalDtype(subtype=subtype, inclusive=val.inclusive) + dtype = IntervalDtype(subtype=subtype, closed=val.closed) return dtype, val diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index f0e4a54c3f05c..606fefd30f37f 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -481,7 +481,7 @@ def is_interval_dtype(arr_or_dtype) -> bool: >>> is_interval_dtype([1, 2, 3]) False >>> - >>> interval = pd.Interval(1, 2, inclusive="right") + >>> interval = pd.Interval(1, 2, closed="right") >>> is_interval_dtype(interval) False >>> is_interval_dtype(pd.IntervalIndex([interval])) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index c2c600adbbe09..e2570e6be4879 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -3,7 +3,6 @@ """ from __future__ import annotations -import inspect import re from typing import ( TYPE_CHECKING, @@ -11,19 +10,12 @@ MutableMapping, cast, ) -import warnings import numpy as np import pytz -from pandas._libs import ( - lib, - missing as libmissing, -) -from pandas._libs.interval import ( - Interval, - _warning_interval, -) +from pandas._libs import missing as libmissing +from pandas._libs.interval import Interval from pandas._libs.properties import cache_readonly from pandas._libs.tslibs import ( BaseOffset, @@ -39,12 +31,10 @@ from pandas._typing import ( Dtype, DtypeObj, - IntervalInclusiveType, Ordered, npt, type_t, ) -from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ( ExtensionDtype, @@ -1072,7 +1062,7 @@ class IntervalDtype(PandasExtensionDtype): Examples -------- - >>> pd.IntervalDtype(subtype='int64', inclusive='both') + >>> pd.IntervalDtype(subtype='int64', closed='both') interval[int64, both] """ @@ -1083,44 +1073,30 @@ class IntervalDtype(PandasExtensionDtype): num = 103 _metadata = ( "subtype", - "inclusive", + "closed", ) _match = re.compile( r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)" - r"(, (?P<inclusive>(right|left|both|neither)))?\]" + r"(, (?P<closed>(right|left|both|neither)))?\]" ) _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} - def __new__( - cls, - subtype=None, - inclusive: IntervalInclusiveType | None = None, - closed: None | lib.NoDefault = lib.no_default, - ): + def __new__(cls, subtype=None, closed: str_type | None = None): from pandas.core.dtypes.common import ( is_string_dtype, pandas_dtype, ) - inclusive, closed = _warning_interval(inclusive, closed) - - if inclusive is not None and inclusive not in { - "right", - "left", - "both", - "neither", - }: - raise ValueError( - "inclusive must be one of 'right', 'left', 'both', 'neither'" - ) + if closed is not None and closed not in {"right", "left", "both", "neither"}: + raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") if isinstance(subtype, IntervalDtype): - if inclusive is not None and inclusive != subtype.inclusive: + if closed is not None and closed != subtype.closed: raise ValueError( - "dtype.inclusive and 'inclusive' do not match. " - "Try IntervalDtype(dtype.subtype, inclusive) instead." + "dtype.closed and 'closed' do not match. " + "Try IntervalDtype(dtype.subtype, closed) instead." ) return subtype elif subtype is None: @@ -1128,7 +1104,7 @@ def __new__( # generally for pickle compat u = object.__new__(cls) u._subtype = None - u._inclusive = inclusive + u._closed = closed return u elif isinstance(subtype, str) and subtype.lower() == "interval": subtype = None @@ -1138,18 +1114,14 @@ def __new__( if m is not None: gd = m.groupdict() subtype = gd["subtype"] - if gd.get("inclusive", None) is not None: - if inclusive is not None: - if inclusive != gd["inclusive"]: + if gd.get("closed", None) is not None: + if closed is not None: + if closed != gd["closed"]: raise ValueError( - "'inclusive' keyword does not match value " + "'closed' keyword does not match value " "specified in dtype string" ) - # Incompatible types in assignment (expression has type - # "Union[str, Any]", variable has type - # "Optional[Union[Literal['left', 'right'], - # Literal['both', 'neither']]]") - inclusive = gd["inclusive"] # type: ignore[assignment] + closed = gd["closed"] try: subtype = pandas_dtype(subtype) @@ -1164,13 +1136,13 @@ def __new__( ) raise TypeError(msg) - key = str(subtype) + str(inclusive) + key = str(subtype) + str(closed) try: return cls._cache_dtypes[key] except KeyError: u = object.__new__(cls) u._subtype = subtype - u._inclusive = inclusive + u._closed = closed cls._cache_dtypes[key] = u return u @@ -1186,18 +1158,9 @@ def _can_hold_na(self) -> bool: return False return True - @property - def inclusive(self): - return self._inclusive - @property def closed(self): - warnings.warn( - "Attribute `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - return self._inclusive + return self._closed @property def subtype(self): @@ -1248,10 +1211,10 @@ def type(self) -> type[Interval]: def __str__(self) -> str_type: if self.subtype is None: return "interval" - if self.inclusive is None: + if self.closed is None: # Only partially initialized GH#38394 return f"interval[{self.subtype}]" - return f"interval[{self.subtype}, {self.inclusive}]" + return f"interval[{self.subtype}, {self.closed}]" def __hash__(self) -> int: # make myself hashable @@ -1265,7 +1228,7 @@ def __eq__(self, other: Any) -> bool: elif self.subtype is None or other.subtype is None: # None should match any subtype return True - elif self.inclusive != other.inclusive: + elif self.closed != other.closed: return False else: from pandas.core.dtypes.common import is_dtype_equal @@ -1277,8 +1240,9 @@ def __setstate__(self, state) -> None: # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._subtype = state["subtype"] - # backward-compat older pickles won't have "inclusive" key - self._inclusive = state.pop("inclusive", None) + + # backward-compat older pickles won't have "closed" key + self._closed = state.pop("closed", None) @classmethod def is_dtype(cls, dtype: object) -> bool: @@ -1320,14 +1284,14 @@ def __from_arrow__( arr = arr.storage left = np.asarray(arr.field("left"), dtype=self.subtype) right = np.asarray(arr.field("right"), dtype=self.subtype) - iarr = IntervalArray.from_arrays(left, right, inclusive=self.inclusive) + iarr = IntervalArray.from_arrays(left, right, closed=self.closed) results.append(iarr) if not results: return IntervalArray.from_arrays( np.array([], dtype=self.subtype), np.array([], dtype=self.subtype), - inclusive=self.inclusive, + closed=self.closed, ) return IntervalArray._concat_same_type(results) @@ -1335,8 +1299,8 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if not all(isinstance(x, IntervalDtype) for x in dtypes): return None - inclusive = cast("IntervalDtype", dtypes[0]).inclusive - if not all(cast("IntervalDtype", x).inclusive == inclusive for x in dtypes): + closed = cast("IntervalDtype", dtypes[0]).closed + if not all(cast("IntervalDtype", x).closed == closed for x in dtypes): return np.dtype(object) from pandas.core.dtypes.cast import find_common_type @@ -1344,7 +1308,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes]) if common == object: return np.dtype(object) - return IntervalDtype(common, inclusive=inclusive) + return IntervalDtype(common, closed=closed) class PandasDtype(ExtensionDtype): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7ed6e0d84445c..5686ae324dd51 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -56,7 +56,7 @@ IgnoreRaise, IndexKeyFunc, IndexLabel, - IntervalInclusiveType, + IntervalClosedType, JSONSerializable, Level, Manager, @@ -8257,7 +8257,7 @@ def between_time( end_time, include_start: bool_t | lib.NoDefault = lib.no_default, include_end: bool_t | lib.NoDefault = lib.no_default, - inclusive: IntervalInclusiveType | None = None, + inclusive: IntervalClosedType | None = None, axis=None, ) -> NDFrameT: """ @@ -8363,7 +8363,7 @@ def between_time( left = True if include_start is lib.no_default else include_start right = True if include_end is lib.no_default else include_end - inc_dict: dict[tuple[bool_t, bool_t], IntervalInclusiveType] = { + inc_dict: dict[tuple[bool_t, bool_t], IntervalClosedType] = { (True, True): "both", (True, False): "left", (False, True): "right", diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 835d6a3948724..c3892c8b2e0de 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -37,7 +37,7 @@ from pandas._typing import ( Dtype, DtypeObj, - IntervalInclusiveType, + IntervalClosedType, IntervalLeftRight, npt, ) @@ -926,7 +926,7 @@ def date_range( normalize: bool = False, name: Hashable = None, closed: Literal["left", "right"] | None | lib.NoDefault = lib.no_default, - inclusive: IntervalInclusiveType | None = None, + inclusive: IntervalClosedType | None = None, **kwargs, ) -> DatetimeIndex: """ @@ -1132,7 +1132,7 @@ def bdate_range( weekmask=None, holidays=None, closed: IntervalLeftRight | lib.NoDefault | None = lib.no_default, - inclusive: IntervalInclusiveType | None = None, + inclusive: IntervalClosedType | None = None, **kwargs, ) -> DatetimeIndex: """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 5ed8f79bbbefe..e686e8453f0d9 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,7 +1,6 @@ """ define the IntervalIndex """ from __future__ import annotations -import inspect from operator import ( le, lt, @@ -12,7 +11,6 @@ Hashable, Literal, ) -import warnings import numpy as np @@ -31,19 +29,15 @@ from pandas._typing import ( Dtype, DtypeObj, - IntervalInclusiveType, + IntervalClosedType, npt, ) from pandas.errors import InvalidIndexError from pandas.util._decorators import ( Appender, cache_readonly, - deprecate_kwarg, -) -from pandas.util._exceptions import ( - find_stack_level, - rewrite_exception, ) +from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import ( find_common_type, @@ -154,7 +148,7 @@ def _new_IntervalIndex(cls, d): _interval_shared_docs["class"] % { "klass": "IntervalIndex", - "summary": "Immutable index of intervals that are inclusive on the same side.", + "summary": "Immutable index of intervals that are closed on the same side.", "name": _index_doc_kwargs["name"], "versionadded": "0.20.0", "extra_attributes": "is_overlapping\nvalues\n", @@ -166,7 +160,7 @@ def _new_IntervalIndex(cls, d): A new ``IntervalIndex`` is typically constructed using :func:`interval_range`: - >>> pd.interval_range(start=0, end=5, inclusive="right") + >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') @@ -180,7 +174,7 @@ def _new_IntervalIndex(cls, d): ), } ) -@inherit_names(["set_closed", "set_inclusive", "to_tuples"], IntervalArray, wrap=True) +@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True) @inherit_names( [ "__array__", @@ -194,12 +188,12 @@ def _new_IntervalIndex(cls, d): ], IntervalArray, ) -@inherit_names(["is_non_overlapping_monotonic", "inclusive"], IntervalArray, cache=True) +@inherit_names(["is_non_overlapping_monotonic", "closed"], IntervalArray, cache=True) class IntervalIndex(ExtensionIndex): _typ = "intervalindex" # annotate properties pinned via inherit_names - inclusive: IntervalInclusiveType + closed: IntervalClosedType is_non_overlapping_monotonic: bool closed_left: bool closed_right: bool @@ -214,11 +208,10 @@ class IntervalIndex(ExtensionIndex): # -------------------------------------------------------------------- # Constructors - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def __new__( cls, data, - inclusive: IntervalInclusiveType | None = None, + closed=None, dtype: Dtype | None = None, copy: bool = False, name: Hashable = None, @@ -230,7 +223,7 @@ def __new__( with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray( data, - inclusive=inclusive, + closed=closed, copy=copy, dtype=dtype, verify_integrity=verify_integrity, @@ -238,15 +231,6 @@ def __new__( return cls._simple_new(array, name) - @property - def closed(self): - warnings.warn( - "Attribute `closed` is deprecated in favor of `inclusive`.", - FutureWarning, - stacklevel=find_stack_level(inspect.currentframe()), - ) - return self.inclusive - @classmethod @Appender( _interval_shared_docs["from_breaks"] @@ -256,29 +240,24 @@ def closed(self): """\ Examples -------- - >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3], "right") + >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_breaks( cls, breaks, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType | None = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: - - if inclusive is None: - inclusive = "right" - with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_breaks( - breaks, inclusive=inclusive, copy=copy, dtype=dtype + breaks, closed=closed, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) @@ -291,30 +270,25 @@ def from_breaks( """\ Examples -------- - >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3], "right") + >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]') """ ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_arrays( cls, left, right, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: - - if inclusive is None: - inclusive = "right" - with rewrite_exception("IntervalArray", cls.__name__): array = IntervalArray.from_arrays( - left, right, inclusive, copy=copy, dtype=dtype + left, right, closed, copy=copy, dtype=dtype ) return cls._simple_new(array, name=name) @@ -327,30 +301,23 @@ def from_arrays( """\ Examples -------- - >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)], "right") + >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') """ ), } ) - @deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def from_tuples( cls, data, - inclusive: IntervalInclusiveType | None = None, + closed: str = "right", name: Hashable = None, copy: bool = False, dtype: Dtype | None = None, ) -> IntervalIndex: - - if inclusive is None: - inclusive = "right" - with rewrite_exception("IntervalArray", cls.__name__): - arr = IntervalArray.from_tuples( - data, inclusive=inclusive, copy=copy, dtype=dtype - ) + arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(arr, name=name) # -------------------------------------------------------------------- @@ -360,7 +327,7 @@ def from_tuples( def _engine(self) -> IntervalTree: # type: ignore[override] left = self._maybe_convert_i8(self.left) right = self._maybe_convert_i8(self.right) - return IntervalTree(left, right, inclusive=self.inclusive) + return IntervalTree(left, right, closed=self.closed) def __contains__(self, key: Any) -> bool: """ @@ -395,7 +362,7 @@ def __reduce__(self): d = { "left": self.left, "right": self.right, - "inclusive": self.inclusive, + "closed": self.closed, "name": self.name, } return _new_IntervalIndex, (type(self), d), None @@ -450,7 +417,7 @@ def is_overlapping(self) -> bool: """ Return True if the IntervalIndex has overlapping intervals, else False. - Two intervals overlap if they share a common point, including inclusive + Two intervals overlap if they share a common point, including closed endpoints. Intervals that only have an open endpoint in common do not overlap. @@ -467,16 +434,16 @@ def is_overlapping(self) -> bool: Examples -------- - >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)], "right") + >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)]) >>> index IntervalIndex([(0, 2], (1, 3], (4, 5]], dtype='interval[int64, right]') >>> index.is_overlapping True - Intervals that share inclusive endpoints overlap: + Intervals that share closed endpoints overlap: - >>> index = pd.interval_range(0, 3, inclusive='both') + >>> index = pd.interval_range(0, 3, closed='both') >>> index IntervalIndex([[0, 1], [1, 2], [2, 3]], dtype='interval[int64, both]') @@ -485,7 +452,7 @@ def is_overlapping(self) -> bool: Intervals that only have an open endpoint in common do not overlap: - >>> index = pd.interval_range(0, 3, inclusive='left') + >>> index = pd.interval_range(0, 3, closed='left') >>> index IntervalIndex([[0, 1), [1, 2), [2, 3)], dtype='interval[int64, left]') @@ -551,7 +518,7 @@ def _maybe_convert_i8(self, key): constructor = Interval if scalar else IntervalIndex.from_arrays # error: "object" not callable return constructor( - left, right, inclusive=self.inclusive + left, right, closed=self.closed ) # type: ignore[operator] if scalar: @@ -632,7 +599,7 @@ def get_loc( Examples -------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) - >>> index = pd.IntervalIndex([i1, i2], "right") + >>> index = pd.IntervalIndex([i1, i2]) >>> index.get_loc(1) 0 @@ -645,20 +612,20 @@ def get_loc( relevant intervals. >>> i3 = pd.Interval(0, 2) - >>> overlapping_index = pd.IntervalIndex([i1, i2, i3], "right") + >>> overlapping_index = pd.IntervalIndex([i1, i2, i3]) >>> overlapping_index.get_loc(0.5) array([ True, False, True]) Only exact matches will be returned if an interval is provided. - >>> index.get_loc(pd.Interval(0, 1, "right")) + >>> index.get_loc(pd.Interval(0, 1)) 0 """ self._check_indexing_method(method) self._check_indexing_error(key) if isinstance(key, Interval): - if self.inclusive != key.inclusive: + if self.closed != key.closed: raise KeyError(key) mask = (self.left == key.left) & (self.right == key.right) elif is_valid_na_for_dtype(key, self.dtype): @@ -719,7 +686,7 @@ def get_indexer_non_unique( target = ensure_index(target) if not self._should_compare(target) and not self._should_partial_index(target): - # e.g. IntervalIndex with different inclusive or incompatible subtype + # e.g. IntervalIndex with different closed or incompatible subtype # -> no matches return self._get_indexer_non_comparable(target, None, unique=False) @@ -871,7 +838,7 @@ def _intersection(self, other, sort): """ intersection specialized to the case with matching dtypes. """ - # For IntervalIndex we also know other.inclusive == self.inclusive + # For IntervalIndex we also know other.closed == self.closed if self.left.is_unique and self.right.is_unique: taken = self._intersection_unique(other) elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1: @@ -983,14 +950,13 @@ def _is_type_compatible(a, b) -> bool: ) -@deprecate_kwarg(old_arg_name="closed", new_arg_name="inclusive") def interval_range( start=None, end=None, periods=None, freq=None, name: Hashable = None, - inclusive: IntervalInclusiveType | None = None, + closed: IntervalClosedType = "right", ) -> IntervalIndex: """ Return a fixed frequency IntervalIndex. @@ -1009,25 +975,17 @@ def interval_range( for numeric and 'D' for datetime-like. name : str, default None Name of the resulting IntervalIndex. - inclusive : {"both", "neither", "left", "right"}, default "both" - Include boundaries; Whether to set each bound as inclusive or not. - - .. versionadded:: 1.5.0 closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. - .. deprecated:: 1.5.0 - Argument `closed` has been deprecated to standardize boundary inputs. - Use `inclusive` instead, to set each bound as closed or open. - Returns ------- IntervalIndex See Also -------- - IntervalIndex : An Index of intervals that are all inclusive on the same side. + IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- @@ -1043,14 +1001,14 @@ def interval_range( -------- Numeric ``start`` and ``end`` is supported. - >>> pd.interval_range(start=0, end=5, inclusive="right") + >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], dtype='interval[int64, right]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), - ... end=pd.Timestamp('2017-01-04'), inclusive="right") + ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], dtype='interval[datetime64[ns], right]') @@ -1059,7 +1017,7 @@ def interval_range( endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. - >>> pd.interval_range(start=0, periods=4, freq=1.5, inclusive="right") + >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') @@ -1067,7 +1025,7 @@ def interval_range( convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), - ... periods=3, freq='MS', inclusive="right") + ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], dtype='interval[datetime64[ns], right]') @@ -1075,20 +1033,17 @@ def interval_range( Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). - >>> pd.interval_range(start=0, end=6, periods=4, inclusive="right") + >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], dtype='interval[float64, right]') - The ``inclusive`` parameter specifies which endpoints of the individual - intervals within the ``IntervalIndex`` are inclusive. + The ``closed`` parameter specifies which endpoints of the individual + intervals within the ``IntervalIndex`` are closed. - >>> pd.interval_range(end=5, periods=4, inclusive='both') + >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], dtype='interval[int64, both]') """ - if inclusive is None: - inclusive = "right" - start = maybe_box_datetimelike(start) end = maybe_box_datetimelike(end) endpoint = start if start is not None else end @@ -1161,4 +1116,4 @@ def interval_range( else: breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) - return IntervalIndex.from_breaks(breaks, name=name, inclusive=inclusive) + return IntervalIndex.from_breaks(breaks, name=name, closed=closed) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3e27cf0b15511..69089cc64e671 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1978,8 +1978,8 @@ def _catch_deprecated_value_error(err: Exception) -> None: # is enforced, stop catching ValueError here altogether if isinstance(err, IncompatibleFrequency): pass - elif "'value.inclusive' is" in str(err): - # IntervalDtype mismatched 'inclusive' + elif "'value.closed' is" in str(err): + # IntervalDtype mismatched 'closed' pass elif "Timezones don't match" not in str(err): raise diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 00b2b30eb3122..94705790e40bd 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -231,7 +231,7 @@ def cut( is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. - >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], inclusive="right") + >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]] @@ -561,7 +561,7 @@ def _format_labels( bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None ): """based on the dtype, return our labels""" - inclusive: IntervalLeftRight = "right" if right else "left" + closed: IntervalLeftRight = "right" if right else "left" formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] @@ -584,7 +584,7 @@ def _format_labels( # adjust lhs of first interval by precision to account for being right closed breaks[0] = adjust(breaks[0]) - return IntervalIndex.from_breaks(breaks, inclusive=inclusive) + return IntervalIndex.from_breaks(breaks, closed=closed) def _preprocess_for_cut(x): diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 0522e113d6525..8444efb7cb636 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -26,7 +26,6 @@ Axis, FilePath, IndexLabel, - IntervalInclusiveType, Level, QuantileInterpolation, Scalar, @@ -3489,7 +3488,7 @@ def highlight_between( axis: Axis | None = 0, left: Scalar | Sequence | None = None, right: Scalar | Sequence | None = None, - inclusive: IntervalInclusiveType = "both", + inclusive: str = "both", props: str | None = None, ) -> Styler: """ @@ -3594,7 +3593,7 @@ def highlight_quantile( q_left: float = 0.0, q_right: float = 1.0, interpolation: QuantileInterpolation = "linear", - inclusive: IntervalInclusiveType = "both", + inclusive: str = "both", props: str | None = None, ) -> Styler: """ @@ -3979,7 +3978,7 @@ def _highlight_between( props: str, left: Scalar | Sequence | np.ndarray | NDFrame | None = None, right: Scalar | Sequence | np.ndarray | NDFrame | None = None, - inclusive: bool | IntervalInclusiveType = True, + inclusive: bool | str = True, ) -> np.ndarray: """ Return an array of css props based on condition of data values within given range. diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 99e1ad1767e07..88e3dca62d9e0 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -62,16 +62,16 @@ def interval_array(left_right_dtypes): return IntervalArray.from_arrays(left, right) -def create_categorical_intervals(left, right, inclusive="right"): - return Categorical(IntervalIndex.from_arrays(left, right, inclusive)) +def create_categorical_intervals(left, right, closed="right"): + return Categorical(IntervalIndex.from_arrays(left, right, closed)) -def create_series_intervals(left, right, inclusive="right"): - return Series(IntervalArray.from_arrays(left, right, inclusive)) +def create_series_intervals(left, right, closed="right"): + return Series(IntervalArray.from_arrays(left, right, closed)) -def create_series_categorical_intervals(left, right, inclusive="right"): - return Series(Categorical(IntervalIndex.from_arrays(left, right, inclusive))) +def create_series_categorical_intervals(left, right, closed="right"): + return Series(Categorical(IntervalIndex.from_arrays(left, right, closed))) class TestComparison: @@ -126,10 +126,8 @@ def test_compare_scalar_interval(self, op, interval_array): tm.assert_numpy_array_equal(result, expected) def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed): - interval_array = IntervalArray.from_arrays( - range(2), range(1, 3), inclusive=closed - ) - other = Interval(0, 1, inclusive=other_closed) + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = Interval(0, 1, closed=other_closed) result = op(interval_array, other) expected = self.elementwise_comparison(op, interval_array, other) @@ -209,10 +207,8 @@ def test_compare_list_like_interval(self, op, interval_array, interval_construct def test_compare_list_like_interval_mixed_closed( self, op, interval_constructor, closed, other_closed ): - interval_array = IntervalArray.from_arrays( - range(2), range(1, 3), inclusive=closed - ) - other = interval_constructor(range(2), range(1, 3), inclusive=other_closed) + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = interval_constructor(range(2), range(1, 3), closed=other_closed) result = op(interval_array, other) expected = self.elementwise_comparison(op, interval_array, other) diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index 48f5c676b66e6..2a6bea3255342 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -55,31 +55,31 @@ def test_is_empty(self, constructor, left, right, closed): # GH27219 tuples = [(left, left), (left, right), np.nan] expected = np.array([closed != "both", False, False]) - result = constructor.from_tuples(tuples, inclusive=closed).is_empty + result = constructor.from_tuples(tuples, closed=closed).is_empty tm.assert_numpy_array_equal(result, expected) class TestMethods: - @pytest.mark.parametrize("new_inclusive", ["left", "right", "both", "neither"]) - def test_set_inclusive(self, closed, new_inclusive): + @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) + def test_set_closed(self, closed, new_closed): # GH 21670 - array = IntervalArray.from_breaks(range(10), inclusive=closed) - result = array.set_inclusive(new_inclusive) - expected = IntervalArray.from_breaks(range(10), inclusive=new_inclusive) + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize( "other", [ - Interval(0, 1, inclusive="right"), - IntervalArray.from_breaks([1, 2, 3, 4], inclusive="right"), + Interval(0, 1, closed="right"), + IntervalArray.from_breaks([1, 2, 3, 4], closed="right"), ], ) def test_where_raises(self, other): # GH#45768 The IntervalArray methods raises; the Series method coerces - ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], inclusive="left")) + ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left")) mask = np.array([True, False, True]) - match = "'value.inclusive' is 'right', expected 'left'." + match = "'value.closed' is 'right', expected 'left'." with pytest.raises(ValueError, match=match): ser.array._where(mask, other) @@ -89,15 +89,15 @@ def test_where_raises(self, other): def test_shift(self): # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502 - a = IntervalArray.from_breaks([1, 2, 3], "right") + a = IntervalArray.from_breaks([1, 2, 3]) result = a.shift() # int -> float - expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)], "right") + expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)]) tm.assert_interval_array_equal(result, expected) def test_shift_datetime(self): # GH#31502, GH#31504 - a = IntervalArray.from_breaks(date_range("2000", periods=4), "right") + a = IntervalArray.from_breaks(date_range("2000", periods=4)) result = a.shift(2) expected = a.take([-1, -1, 0], allow_fill=True) tm.assert_interval_array_equal(result, expected) @@ -134,12 +134,12 @@ def test_set_na(self, left_right_dtypes): tm.assert_extension_array_equal(result, expected) - def test_setitem_mismatched_inclusive(self): - arr = IntervalArray.from_breaks(range(4), "right") + def test_setitem_mismatched_closed(self): + arr = IntervalArray.from_breaks(range(4)) orig = arr.copy() - other = arr.set_inclusive("both") + other = arr.set_closed("both") - msg = "'value.inclusive' is 'both', expected 'right'" + msg = "'value.closed' is 'both', expected 'right'" with pytest.raises(ValueError, match=msg): arr[0] = other[0] with pytest.raises(ValueError, match=msg): @@ -156,13 +156,13 @@ def test_setitem_mismatched_inclusive(self): arr[:] = other[::-1].astype("category") # empty list should be no-op - arr[:0] = IntervalArray.from_breaks([], "right") + arr[:0] = [] tm.assert_interval_array_equal(arr, orig) def test_repr(): # GH 25022 - arr = IntervalArray.from_tuples([(0, 1), (1, 2)], "right") + arr = IntervalArray.from_tuples([(0, 1), (1, 2)]) result = repr(arr) expected = ( "<IntervalArray>\n" @@ -254,7 +254,7 @@ def test_arrow_extension_type(): p2 = ArrowIntervalType(pa.int64(), "left") p3 = ArrowIntervalType(pa.int64(), "right") - assert p1.inclusive == "left" + assert p1.closed == "left" assert p1 == p2 assert not p1 == p3 assert hash(p1) == hash(p2) @@ -271,7 +271,7 @@ def test_arrow_array(): result = pa.array(intervals) assert isinstance(result.type, ArrowIntervalType) - assert result.type.inclusive == intervals.inclusive + assert result.type.closed == intervals.closed assert result.type.subtype == pa.int64() assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64")) assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64")) @@ -302,7 +302,7 @@ def test_arrow_array_missing(): result = pa.array(arr) assert isinstance(result.type, ArrowIntervalType) - assert result.type.inclusive == arr.inclusive + assert result.type.closed == arr.closed assert result.type.subtype == pa.float64() # fields have missing values (not NaN) @@ -386,11 +386,11 @@ def test_from_arrow_from_raw_struct_array(): import pyarrow as pa arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}]) - dtype = pd.IntervalDtype(np.dtype("int64"), inclusive="neither") + dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither") result = dtype.__from_arrow__(arr) expected = IntervalArray.from_breaks( - np.array([0, 1, 2], dtype="int64"), inclusive="neither" + np.array([0, 1, 2], dtype="int64"), closed="neither" ) tm.assert_extension_array_equal(result, expected) @@ -398,51 +398,6 @@ def test_from_arrow_from_raw_struct_array(): tm.assert_extension_array_equal(result, expected) -def test_interval_error_and_warning(): - # GH 40245 - msg = ( - "Deprecated argument `closed` cannot " - "be passed if argument `inclusive` is not None" - ) - with pytest.raises(ValueError, match=msg): - Interval(0, 1, closed="both", inclusive="both") - - msg = "Argument `closed` is deprecated in favor of `inclusive`" - with tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False): - Interval(0, 1, closed="both") - - -def test_interval_array_error_and_warning(): - # GH 40245 - msg = "Can only specify 'closed' or 'inclusive', not both." - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning): - IntervalArray( - [Interval(0, 1), Interval(1, 5)], closed="both", inclusive="both" - ) - - msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead." - with tm.assert_produces_warning(FutureWarning, match=msg): - IntervalArray([Interval(0, 1), Interval(1, 5)], closed="both") - - -@pyarrow_skip -def test_arrow_interval_type_error_and_warning(): - # GH 40245 - import pyarrow as pa - - from pandas.core.arrays.arrow.extension_types import ArrowIntervalType - - msg = "Can only specify 'closed' or 'inclusive', not both." - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning): - ArrowIntervalType(pa.int64(), closed="both", inclusive="both") - - msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead." - with tm.assert_produces_warning(FutureWarning, match=msg): - ArrowIntervalType(pa.int64(), closed="both") - - @pytest.mark.parametrize("timezone", ["UTC", "US/Pacific", "GMT"]) def test_interval_index_subtype(timezone, inclusive_endpoints_fixture): # GH 46999 @@ -451,45 +406,10 @@ def test_interval_index_subtype(timezone, inclusive_endpoints_fixture): result = IntervalIndex.from_arrays( ["2022-01-01", "2022-01-02"], ["2022-01-02", "2022-01-03"], - inclusive=inclusive_endpoints_fixture, + closed=inclusive_endpoints_fixture, dtype=dtype, ) expected = IntervalIndex.from_arrays( - dates[:-1], dates[1:], inclusive=inclusive_endpoints_fixture + dates[:-1], dates[1:], closed=inclusive_endpoints_fixture ) tm.assert_index_equal(result, expected) - - -def test_from_tuples_deprecation(): - # GH#40245 - with tm.assert_produces_warning(FutureWarning): - IntervalArray.from_tuples([(0, 1), (1, 2)], closed="right") - - -def test_from_tuples_deprecation_error(): - # GH#40245 - msg = "Can only specify 'closed' or 'inclusive', not both." - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning): - IntervalArray.from_tuples( - [(0, 1), (1, 2)], closed="right", inclusive="right" - ) - - -def test_from_breaks_deprecation(): - # GH#40245 - with tm.assert_produces_warning(FutureWarning): - IntervalArray.from_breaks([0, 1, 2, 3], closed="right") - - -def test_from_arrays_deprecation(): - # GH#40245 - with tm.assert_produces_warning(FutureWarning): - IntervalArray.from_arrays([0, 1, 2], [1, 2, 3], closed="right") - - -def test_set_closed_deprecated(): - # GH#40245 - array = IntervalArray.from_breaks(range(10)) - with tm.assert_produces_warning(FutureWarning): - array.set_closed(closed="both") diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 79e73fec706f1..9f8c277f07891 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -133,9 +133,9 @@ ), # Interval ( - [pd.Interval(1, 2, "right"), pd.Interval(3, 4, "right")], + [pd.Interval(1, 2), pd.Interval(3, 4)], "interval", - IntervalArray.from_tuples([(1, 2), (3, 4)], "right"), + IntervalArray.from_tuples([(1, 2), (3, 4)]), ), # Sparse ([0, 1], "Sparse[int64]", SparseArray([0, 1], dtype="int64")), @@ -206,10 +206,7 @@ def test_array_copy(): period_array(["2000", "2001"], freq="D"), ), # interval - ( - [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right")], - IntervalArray.from_breaks([0, 1, 2], "right"), - ), + ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])), # datetime ( [pd.Timestamp("2000"), pd.Timestamp("2001")], @@ -298,8 +295,8 @@ def test_array_inference(data, expected): [ # mix of frequencies [pd.Period("2000", "D"), pd.Period("2001", "A")], - # mix of inclusive - [pd.Interval(0, 1, "left"), pd.Interval(1, 2, "right")], + # mix of closed + [pd.Interval(0, 1, closed="left"), pd.Interval(1, 2, closed="right")], # Mix of timezones [pd.Timestamp("2000", tz="CET"), pd.Timestamp("2000", tz="UTC")], # Mix of tz-aware and tz-naive diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 3adaddf89cf30..599aaae4d3527 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -290,10 +290,8 @@ def test_array_multiindex_raises(): ), (pd.array([0, np.nan], dtype="Int64"), np.array([0, pd.NA], dtype=object)), ( - IntervalArray.from_breaks([0, 1, 2], "right"), - np.array( - [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right")], dtype=object - ), + IntervalArray.from_breaks([0, 1, 2]), + np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object), ), (SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)), # tz-naive datetime diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index 55a6cc48ebfc8..c46f1b036dbee 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -133,10 +133,10 @@ def test_value_counts_bins(index_or_series): s1 = Series([1, 1, 2, 3]) res1 = s1.value_counts(bins=1) - exp1 = Series({Interval(0.997, 3.0, "right"): 4}) + exp1 = Series({Interval(0.997, 3.0): 4}) tm.assert_series_equal(res1, exp1) res1n = s1.value_counts(bins=1, normalize=True) - exp1n = Series({Interval(0.997, 3.0, "right"): 1.0}) + exp1n = Series({Interval(0.997, 3.0): 1.0}) tm.assert_series_equal(res1n, exp1n) if isinstance(s1, Index): @@ -149,12 +149,12 @@ def test_value_counts_bins(index_or_series): # these return the same res4 = s1.value_counts(bins=4, dropna=True) - intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0], "right") + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) res4 = s1.value_counts(bins=4, dropna=False) - intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0], "right") + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2])) tm.assert_series_equal(res4, exp4) diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 92b99ba6d1fe2..984655c68d56b 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -269,7 +269,7 @@ def test_is_interval_dtype(): assert com.is_interval_dtype(IntervalDtype()) - interval = pd.Interval(1, 2, inclusive="right") + interval = pd.Interval(1, 2, closed="right") assert not com.is_interval_dtype(interval) assert com.is_interval_dtype(pd.IntervalIndex([interval])) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 64849c4223486..aeae5fec481ec 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -577,30 +577,21 @@ def test_hash_vs_equality(self, dtype): "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")] ) def test_construction(self, subtype): - i = IntervalDtype(subtype, inclusive="right") + i = IntervalDtype(subtype, closed="right") assert i.subtype == np.dtype("int64") assert is_interval_dtype(i) - @pytest.mark.parametrize( - "subtype", ["interval[int64, right]", "Interval[int64, right]"] - ) - def test_construction_string_regex(self, subtype): - i = IntervalDtype(subtype=subtype) - assert i.subtype == np.dtype("int64") - assert i.inclusive == "right" - assert is_interval_dtype(i) - @pytest.mark.parametrize( "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")] ) - def test_construction_allows_inclusive_none(self, subtype): + def test_construction_allows_closed_none(self, subtype): # GH#38394 dtype = IntervalDtype(subtype) - assert dtype.inclusive is None + assert dtype.closed is None - def test_inclusive_mismatch(self): - msg = "'inclusive' keyword does not match value specified in dtype string" + def test_closed_mismatch(self): + msg = "'closed' keyword does not match value specified in dtype string" with pytest.raises(ValueError, match=msg): IntervalDtype("interval[int64, left]", "right") @@ -638,16 +629,16 @@ def test_construction_errors(self, subtype): with pytest.raises(TypeError, match=msg): IntervalDtype(subtype) - def test_inclusive_must_match(self): + def test_closed_must_match(self): # GH#37933 dtype = IntervalDtype(np.float64, "left") - msg = "dtype.inclusive and 'inclusive' do not match" + msg = "dtype.closed and 'closed' do not match" with pytest.raises(ValueError, match=msg): - IntervalDtype(dtype, inclusive="both") + IntervalDtype(dtype, closed="both") - def test_inclusive_invalid(self): - with pytest.raises(ValueError, match="inclusive must be one of"): + def test_closed_invalid(self): + with pytest.raises(ValueError, match="closed must be one of"): IntervalDtype(np.float64, "foo") def test_construction_from_string(self, dtype): @@ -747,8 +738,8 @@ def test_equality(self, dtype): ) def test_equality_generic(self, subtype): # GH 18980 - inclusive = "right" if subtype is not None else None - dtype = IntervalDtype(subtype, inclusive=inclusive) + closed = "right" if subtype is not None else None + dtype = IntervalDtype(subtype, closed=closed) assert is_dtype_equal(dtype, "interval") assert is_dtype_equal(dtype, IntervalDtype()) @@ -766,9 +757,9 @@ def test_equality_generic(self, subtype): ) def test_name_repr(self, subtype): # GH 18980 - inclusive = "right" if subtype is not None else None - dtype = IntervalDtype(subtype, inclusive=inclusive) - expected = f"interval[{subtype}, {inclusive}]" + closed = "right" if subtype is not None else None + dtype = IntervalDtype(subtype, closed=closed) + expected = f"interval[{subtype}, {closed}]" assert str(dtype) == expected assert dtype.name == "interval" @@ -822,29 +813,14 @@ def test_not_string(self): # GH30568: though IntervalDtype has object kind, it cannot be string assert not is_string_dtype(IntervalDtype()) - def test_unpickling_without_inclusive(self): + def test_unpickling_without_closed(self): # GH#38394 dtype = IntervalDtype("interval") - assert dtype._inclusive is None + assert dtype._closed is None tm.round_trip_pickle(dtype) - def test_interval_dtype_error_and_warning(self): - # GH 40245 - msg = ( - "Deprecated argument `closed` cannot " - "be passed if argument `inclusive` is not None" - ) - with pytest.raises(ValueError, match=msg): - IntervalDtype("int64", closed="right", inclusive="right") - - msg = "Argument `closed` is deprecated in favor of `inclusive`" - with tm.assert_produces_warning( - FutureWarning, match=msg, check_stacklevel=False - ): - IntervalDtype("int64", closed="right") - class TestCategoricalDtypeParametrized: @pytest.mark.parametrize( diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 14f37bca71f82..f08d6b8c9feb8 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -966,7 +966,7 @@ def test_mixed_dtypes_remain_object_array(self): @pytest.mark.parametrize( "idx", [ - pd.IntervalIndex.from_breaks(range(5), inclusive="both"), + pd.IntervalIndex.from_breaks(range(5), closed="both"), pd.period_range("2016-01-01", periods=3, freq="D"), ], ) @@ -1659,7 +1659,7 @@ def test_categorical(self): @pytest.mark.parametrize("asobject", [True, False]) def test_interval(self, asobject): - idx = pd.IntervalIndex.from_breaks(range(5), inclusive="both") + idx = pd.IntervalIndex.from_breaks(range(5), closed="both") if asobject: idx = idx.astype(object) @@ -1675,21 +1675,21 @@ def test_interval(self, asobject): @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0]) def test_interval_mismatched_closed(self, value): - first = Interval(value, value, inclusive="left") - second = Interval(value, value, inclusive="right") + first = Interval(value, value, closed="left") + second = Interval(value, value, closed="right") - # if inclusive match, we should infer "interval" + # if closed match, we should infer "interval" arr = np.array([first, first], dtype=object) assert lib.infer_dtype(arr, skipna=False) == "interval" - # if inclusive dont match, we should _not_ get "interval" + # if closed dont match, we should _not_ get "interval" arr2 = np.array([first, second], dtype=object) assert lib.infer_dtype(arr2, skipna=False) == "mixed" def test_interval_mismatched_subtype(self): - first = Interval(0, 1, inclusive="left") - second = Interval(Timestamp(0), Timestamp(1), inclusive="left") - third = Interval(Timedelta(0), Timedelta(1), inclusive="left") + first = Interval(0, 1, closed="left") + second = Interval(Timestamp(0), Timestamp(1), closed="left") + third = Interval(Timedelta(0), Timedelta(1), closed="left") arr = np.array([first, second]) assert lib.infer_dtype(arr, skipna=False) == "mixed" @@ -1701,7 +1701,7 @@ def test_interval_mismatched_subtype(self): assert lib.infer_dtype(arr, skipna=False) == "mixed" # float vs int subdtype are compatible - flt_interval = Interval(1.5, 2.5, inclusive="left") + flt_interval = Interval(1.5, 2.5, closed="left") arr = np.array([first, flt_interval], dtype=object) assert lib.infer_dtype(arr, skipna=False) == "interval" diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index 04fa3c11a6c40..bb948f2281c64 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -10,7 +10,6 @@ import pandas as pd import pandas._testing as tm -from pandas.core.arrays import IntervalArray from pandas.tests.extension.base.base import BaseExtensionTests @@ -77,17 +76,10 @@ def test_setitem_sequence_mismatched_length_raises(self, data, as_array): self.assert_series_equal(ser, original) def test_setitem_empty_indexer(self, data, box_in_series): - data_dtype = type(data) - if box_in_series: data = pd.Series(data) original = data.copy() - - if data_dtype == IntervalArray: - data[np.array([], dtype=int)] = IntervalArray([], "right") - else: - data[np.array([], dtype=int)] = [] - + data[np.array([], dtype=int)] = [] self.assert_equal(data, original) def test_setitem_sequence_broadcasts(self, data, box_in_series): diff --git a/pandas/tests/extension/test_interval.py b/pandas/tests/extension/test_interval.py index eb307d964d736..0f916cea9d518 100644 --- a/pandas/tests/extension/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -30,9 +30,7 @@ def make_data(): N = 100 left_array = np.random.uniform(size=N).cumsum() right_array = left_array + np.random.uniform(size=N) - return [ - Interval(left, right, "right") for left, right in zip(left_array, right_array) - ] + return [Interval(left, right) for left, right in zip(left_array, right_array)] @pytest.fixture @@ -43,7 +41,7 @@ def dtype(): @pytest.fixture def data(): """Length-100 PeriodArray for semantics test.""" - return IntervalArray(make_data(), "right") + return IntervalArray(make_data()) @pytest.fixture diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index 715f69cc03828..c6d54e28ca1c8 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -229,11 +229,7 @@ def test_from_records_series_list_dict(self): def test_from_records_series_categorical_index(self): # GH#32805 index = CategoricalIndex( - [ - Interval(-20, -10, "right"), - Interval(-10, 0, "right"), - Interval(0, 10, "right"), - ] + [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)] ) series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) frame = DataFrame.from_records(series_of_dicts, index=index) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 6b19738becc8e..53fcfe334b770 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -236,10 +236,7 @@ def test_setitem_dict_preserves_dtypes(self): "obj,dtype", [ (Period("2020-01"), PeriodDtype("M")), - ( - Interval(left=0, right=5, inclusive="right"), - IntervalDtype("int64", "right"), - ), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index 6bfe07feb010d..c71b688d390d4 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -402,7 +402,7 @@ def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): (datetime(2020, 1, 1), datetime(2020, 1, 2)), (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")), (pd.Timedelta("89 days"), pd.Timedelta("60 min")), - (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, inclusive="left")), + (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")), ], ) def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture): diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index bd168e4f14558..37431bc291b76 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -751,7 +751,7 @@ def test_reset_index_interval_columns_object_cast(): result = df.reset_index() expected = DataFrame( [[1, 1.0, 0.0], [2, 0.0, 1.0]], - columns=Index(["Year", Interval(0, 1, "right"), Interval(1, 2, "right")]), + columns=Index(["Year", Interval(0, 1), Interval(1, 2)]), ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_round.py b/pandas/tests/frame/methods/test_round.py index 77cadfff55e2f..dd9206940bcd6 100644 --- a/pandas/tests/frame/methods/test_round.py +++ b/pandas/tests/frame/methods/test_round.py @@ -210,7 +210,7 @@ def test_round_nonunique_categorical(self): def test_round_interval_category_columns(self): # GH#30063 - columns = pd.CategoricalIndex(pd.interval_range(0, 2, inclusive="right")) + columns = pd.CategoricalIndex(pd.interval_range(0, 2)) df = DataFrame([[0.66, 1.1], [0.3, 0.25]], columns=columns) result = df.round() diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 9cad965e9cb5c..5d1cc3d4ecee5 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -384,7 +384,7 @@ def test_sort_index_intervalindex(self): result = model.groupby(["X1", "X2"], observed=True).mean().unstack() expected = IntervalIndex.from_tuples( - [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], inclusive="right" + [(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right" ) result = result.columns.levels[1].categories tm.assert_index_equal(result, expected) @@ -729,11 +729,7 @@ def test_sort_index_multilevel_repr_8017(self, gen, extra): [ pytest.param(["a", "b", "c"], id="str"), pytest.param( - [ - pd.Interval(0, 1, "right"), - pd.Interval(1, 2, "right"), - pd.Interval(2, 3, "right"), - ], + [pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)], id="pd.Interval", ), ], diff --git a/pandas/tests/frame/methods/test_to_csv.py b/pandas/tests/frame/methods/test_to_csv.py index df7bc04202e39..1933278efb443 100644 --- a/pandas/tests/frame/methods/test_to_csv.py +++ b/pandas/tests/frame/methods/test_to_csv.py @@ -1316,7 +1316,7 @@ def test_to_csv_categorical_and_interval(self): pd.Interval( Timestamp("2020-01-01"), Timestamp("2020-01-02"), - inclusive="both", + closed="both", ) ] } diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 4c2e9b8530e81..7d3af7dfa9a42 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -902,10 +902,7 @@ def test_constructor_dict_extension_scalar(self, ea_scalar_and_dtype): "data,dtype", [ (Period("2020-01"), PeriodDtype("M")), - ( - Interval(left=0, right=5, inclusive="right"), - IntervalDtype("int64", "right"), - ), + (Interval(left=0, right=5), IntervalDtype("int64", "right")), ( Timestamp("2011-01-01", tz="US/Eastern"), DatetimeTZDtype(tz="US/Eastern"), @@ -2431,16 +2428,16 @@ def test_constructor_series_nonexact_categoricalindex(self): result = DataFrame({"1": ser1, "2": ser2}) index = CategoricalIndex( [ - Interval(-0.099, 9.9, inclusive="right"), - Interval(9.9, 19.8, inclusive="right"), - Interval(19.8, 29.7, inclusive="right"), - Interval(29.7, 39.6, inclusive="right"), - Interval(39.6, 49.5, inclusive="right"), - Interval(49.5, 59.4, inclusive="right"), - Interval(59.4, 69.3, inclusive="right"), - Interval(69.3, 79.2, inclusive="right"), - Interval(79.2, 89.1, inclusive="right"), - Interval(89.1, 99, inclusive="right"), + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), ], ordered=True, ) diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 6c5a3ae67c78a..0a3845617b32d 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -216,7 +216,7 @@ def test_cython_agg_empty_buckets_nanops(observed): result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general( "sum", alt=None, numeric_only=True ) - intervals = pd.interval_range(0, 20, freq=5, inclusive="right") + intervals = pd.interval_range(0, 20, freq=5) expected = DataFrame( {"a": [0, 0, 36, 0]}, index=pd.CategoricalIndex(intervals, name="a", ordered=True), diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 6da07dafcda74..728575a80f32f 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -799,13 +799,13 @@ def test_get_group_empty_bins(self, observed): # TODO: should prob allow a str of Interval work as well # IOW '(0, 5]' - result = g.get_group(pd.Interval(0, 5, "right")) + result = g.get_group(pd.Interval(0, 5)) expected = DataFrame([3, 1], index=[0, 1]) tm.assert_frame_equal(result, expected) - msg = r"Interval\(10, 15, inclusive='right'\)" + msg = r"Interval\(10, 15, closed='right'\)" with pytest.raises(KeyError, match=msg): - g.get_group(pd.Interval(10, 15, "right")) + g.get_group(pd.Interval(10, 15)) def test_get_group_grouped_by_tuple(self): # GH 8121 diff --git a/pandas/tests/indexes/categorical/test_astype.py b/pandas/tests/indexes/categorical/test_astype.py index ec3e3dca92808..854ae8b62db30 100644 --- a/pandas/tests/indexes/categorical/test_astype.py +++ b/pandas/tests/indexes/categorical/test_astype.py @@ -26,9 +26,7 @@ def test_astype(self): assert not isinstance(result, CategoricalIndex) # interval - ii = IntervalIndex.from_arrays( - left=[-0.001, 2.0], right=[2, 4], inclusive="right" - ) + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") ci = CategoricalIndex( Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) diff --git a/pandas/tests/indexes/categorical/test_reindex.py b/pandas/tests/indexes/categorical/test_reindex.py index 8764063a1a008..1337eff1f1c2f 100644 --- a/pandas/tests/indexes/categorical/test_reindex.py +++ b/pandas/tests/indexes/categorical/test_reindex.py @@ -69,15 +69,15 @@ def test_reindex_empty_index(self): def test_reindex_categorical_added_category(self): # GH 42424 ci = CategoricalIndex( - [Interval(0, 1, inclusive="right"), Interval(1, 2, inclusive="right")], + [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], ordered=True, ) ci_add = CategoricalIndex( [ - Interval(0, 1, inclusive="right"), - Interval(1, 2, inclusive="right"), - Interval(2, 3, inclusive="right"), - Interval(3, 4, inclusive="right"), + Interval(0, 1, closed="right"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="right"), + Interval(3, 4, closed="right"), ], ordered=True, ) diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py index 6751a383699bb..c253a745ef5a2 100644 --- a/pandas/tests/indexes/interval/test_astype.py +++ b/pandas/tests/indexes/interval/test_astype.py @@ -82,7 +82,7 @@ class TestIntSubtype(AstypeTests): indexes = [ IntervalIndex.from_breaks(np.arange(-10, 11, dtype="int64")), - IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), inclusive="left"), + IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), closed="left"), ] @pytest.fixture(params=indexes) @@ -93,12 +93,10 @@ def index(self, request): "subtype", ["float64", "datetime64[ns]", "timedelta64[ns]"] ) def test_subtype_conversion(self, index, subtype): - dtype = IntervalDtype(subtype, index.inclusive) + dtype = IntervalDtype(subtype, index.closed) result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), - index.right.astype(subtype), - inclusive=index.inclusive, + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed ) tm.assert_index_equal(result, expected) @@ -107,19 +105,19 @@ def test_subtype_conversion(self, index, subtype): ) def test_subtype_integer(self, subtype_start, subtype_end): index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start)) - dtype = IntervalDtype(subtype_end, index.inclusive) + dtype = IntervalDtype(subtype_end, index.closed) result = index.astype(dtype) expected = IntervalIndex.from_arrays( index.left.astype(subtype_end), index.right.astype(subtype_end), - inclusive=index.inclusive, + closed=index.closed, ) tm.assert_index_equal(result, expected) @pytest.mark.xfail(reason="GH#15832") def test_subtype_integer_errors(self): # int64 -> uint64 fails with negative values - index = interval_range(-10, 10, inclusive="right") + index = interval_range(-10, 10) dtype = IntervalDtype("uint64", "right") # Until we decide what the exception message _should_ be, we @@ -135,11 +133,9 @@ class TestFloatSubtype(AstypeTests): """Tests specific to IntervalIndex with float subtype""" indexes = [ - interval_range(-10.0, 10.0, inclusive="neither"), + interval_range(-10.0, 10.0, closed="neither"), IntervalIndex.from_arrays( - [-1.5, np.nan, 0.0, 0.0, 1.5], - [-0.5, np.nan, 1.0, 1.0, 3.0], - inclusive="both", + [-1.5, np.nan, 0.0, 0.0, 1.5], [-0.5, np.nan, 1.0, 1.0, 3.0], closed="both" ), ] @@ -153,9 +149,7 @@ def test_subtype_integer(self, subtype): dtype = IntervalDtype(subtype, "right") result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), - index.right.astype(subtype), - inclusive=index.inclusive, + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed ) tm.assert_index_equal(result, expected) @@ -170,15 +164,13 @@ def test_subtype_integer_with_non_integer_borders(self, subtype): dtype = IntervalDtype(subtype, "right") result = index.astype(dtype) expected = IntervalIndex.from_arrays( - index.left.astype(subtype), - index.right.astype(subtype), - inclusive=index.inclusive, + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed ) tm.assert_index_equal(result, expected) def test_subtype_integer_errors(self): # float64 -> uint64 fails with negative values - index = interval_range(-10.0, 10.0, inclusive="right") + index = interval_range(-10.0, 10.0) dtype = IntervalDtype("uint64", "right") msg = re.escape( "Cannot convert interval[float64, right] to interval[uint64, right]; " @@ -199,10 +191,10 @@ class TestDatetimelikeSubtype(AstypeTests): """Tests specific to IntervalIndex with datetime-like subtype""" indexes = [ - interval_range(Timestamp("2018-01-01"), periods=10, inclusive="neither"), + interval_range(Timestamp("2018-01-01"), periods=10, closed="neither"), interval_range(Timestamp("2018-01-01"), periods=10).insert(2, NaT), interval_range(Timestamp("2018-01-01", tz="US/Eastern"), periods=10), - interval_range(Timedelta("0 days"), periods=10, inclusive="both"), + interval_range(Timedelta("0 days"), periods=10, closed="both"), interval_range(Timedelta("0 days"), periods=10).insert(2, NaT), ] @@ -224,9 +216,7 @@ def test_subtype_integer(self, index, subtype): new_left = index.left.astype(subtype) new_right = index.right.astype(subtype) - expected = IntervalIndex.from_arrays( - new_left, new_right, inclusive=index.inclusive - ) + expected = IntervalIndex.from_arrays(new_left, new_right, closed=index.closed) tm.assert_index_equal(result, expected) def test_subtype_float(self, index): diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 933707bfe8357..c44303aa2c862 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -16,14 +16,14 @@ class TestBase(Base): @pytest.fixture def simple_index(self) -> IntervalIndex: - return self._index_cls.from_breaks(range(11), inclusive="right") + return self._index_cls.from_breaks(range(11), closed="right") @pytest.fixture def index(self): return tm.makeIntervalIndex(10) - def create_index(self, *, inclusive="right"): - return IntervalIndex.from_breaks(range(11), inclusive=inclusive) + def create_index(self, *, closed="right"): + return IntervalIndex.from_breaks(range(11), closed=closed) def test_repr_max_seq_item_setting(self): # override base test: not a valid repr as we use interval notation @@ -34,13 +34,13 @@ def test_repr_roundtrip(self): pass def test_take(self, closed): - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) result = index.take(range(10)) tm.assert_index_equal(result, index) result = index.take([0, 0, 1]) - expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], inclusive=closed) + expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2], closed=closed) tm.assert_index_equal(result, expected) def test_where(self, simple_index, listlike_box): diff --git a/pandas/tests/indexes/interval/test_constructors.py b/pandas/tests/indexes/interval/test_constructors.py index a23f66d241cd9..a71a8f9e34ea9 100644 --- a/pandas/tests/indexes/interval/test_constructors.py +++ b/pandas/tests/indexes/interval/test_constructors.py @@ -53,24 +53,14 @@ class ConstructorTests: ) def test_constructor(self, constructor, breaks, closed, name): result_kwargs = self.get_kwargs_from_breaks(breaks, closed) - result = constructor(inclusive=closed, name=name, **result_kwargs) + result = constructor(closed=closed, name=name, **result_kwargs) - assert result.inclusive == closed + assert result.closed == closed assert result.name == name assert result.dtype.subtype == getattr(breaks, "dtype", "int64") tm.assert_index_equal(result.left, Index(breaks[:-1])) tm.assert_index_equal(result.right, Index(breaks[1:])) - def test_constructor_inclusive_default(self, constructor, name): - result_kwargs = self.get_kwargs_from_breaks([3, 14, 15, 92, 653]) - inclusive_in = result_kwargs.pop("inclusive", None) - result = constructor(name=name, **result_kwargs) - - if inclusive_in is not None: - result_kwargs["inclusive"] = "right" - expected = constructor(name=name, **result_kwargs) - tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( "breaks, subtype", [ @@ -104,8 +94,8 @@ def test_constructor_dtype(self, constructor, breaks, subtype): timedelta_range("1 day", periods=5), ], ) - def test_constructor_pass_inclusive(self, constructor, breaks): - # not passing inclusive to IntervalDtype, but to IntervalArray constructor + def test_constructor_pass_closed(self, constructor, breaks): + # not passing closed to IntervalDtype, but to IntervalArray constructor warn = None if isinstance(constructor, partial) and constructor.func is Index: # passing kwargs to Index is deprecated @@ -118,20 +108,20 @@ def test_constructor_pass_inclusive(self, constructor, breaks): for dtype in (iv_dtype, str(iv_dtype)): with tm.assert_produces_warning(warn): - result = constructor(dtype=dtype, inclusive="left", **result_kwargs) - assert result.dtype.inclusive == "left" + result = constructor(dtype=dtype, closed="left", **result_kwargs) + assert result.dtype.closed == "left" @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize("breaks", [[np.nan] * 2, [np.nan] * 4, [np.nan] * 50]) def test_constructor_nan(self, constructor, breaks, closed): # GH 18421 result_kwargs = self.get_kwargs_from_breaks(breaks) - result = constructor(inclusive=closed, **result_kwargs) + result = constructor(closed=closed, **result_kwargs) expected_subtype = np.float64 expected_values = np.array(breaks[:-1], dtype=object) - assert result.inclusive == closed + assert result.closed == closed assert result.dtype.subtype == expected_subtype tm.assert_numpy_array_equal(np.array(result), expected_values) @@ -149,13 +139,13 @@ def test_constructor_nan(self, constructor, breaks, closed): def test_constructor_empty(self, constructor, breaks, closed): # GH 18421 result_kwargs = self.get_kwargs_from_breaks(breaks) - result = constructor(inclusive=closed, **result_kwargs) + result = constructor(closed=closed, **result_kwargs) expected_values = np.array([], dtype=object) expected_subtype = getattr(breaks, "dtype", np.int64) assert result.empty - assert result.inclusive == closed + assert result.closed == closed assert result.dtype.subtype == expected_subtype tm.assert_numpy_array_equal(np.array(result), expected_values) @@ -193,10 +183,10 @@ def test_generic_errors(self, constructor): # filler input data to be used when supplying invalid kwargs filler = self.get_kwargs_from_breaks(range(10)) - # invalid inclusive - msg = "inclusive must be one of 'right', 'left', 'both', 'neither'" + # invalid closed + msg = "closed must be one of 'right', 'left', 'both', 'neither'" with pytest.raises(ValueError, match=msg): - constructor(inclusive="invalid", **filler) + constructor(closed="invalid", **filler) # unsupported dtype msg = "dtype must be an IntervalDtype, got int64" @@ -229,7 +219,7 @@ class TestFromArrays(ConstructorTests): def constructor(self): return IntervalIndex.from_arrays - def get_kwargs_from_breaks(self, breaks, inclusive="right"): + def get_kwargs_from_breaks(self, breaks, closed="right"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_arrays @@ -278,7 +268,7 @@ class TestFromBreaks(ConstructorTests): def constructor(self): return IntervalIndex.from_breaks - def get_kwargs_from_breaks(self, breaks, inclusive="right"): + def get_kwargs_from_breaks(self, breaks, closed="right"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_breaks @@ -316,7 +306,7 @@ class TestFromTuples(ConstructorTests): def constructor(self): return IntervalIndex.from_tuples - def get_kwargs_from_breaks(self, breaks, inclusive="right"): + def get_kwargs_from_breaks(self, breaks, closed="right"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by IntervalIndex.from_tuples @@ -366,7 +356,7 @@ class TestClassConstructors(ConstructorTests): def constructor(self, request): return request.param - def get_kwargs_from_breaks(self, breaks, inclusive="right"): + def get_kwargs_from_breaks(self, breaks, closed="right"): """ converts intervals in breaks format to a dictionary of kwargs to specific to the format expected by the IntervalIndex/Index constructors @@ -375,7 +365,7 @@ def get_kwargs_from_breaks(self, breaks, inclusive="right"): return {"data": breaks} ivs = [ - Interval(left, right, inclusive) if notna(left) else left + Interval(left, right, closed) if notna(left) else left for left, right in zip(breaks[:-1], breaks[1:]) ] @@ -399,9 +389,9 @@ def test_constructor_string(self): pass def test_constructor_errors(self, constructor): - # mismatched inclusive within intervals with no constructor override - ivs = [Interval(0, 1, inclusive="right"), Interval(2, 3, inclusive="left")] - msg = "intervals must all be inclusive on the same side" + # mismatched closed within intervals with no constructor override + ivs = [Interval(0, 1, closed="right"), Interval(2, 3, closed="left")] + msg = "intervals must all be closed on the same side" with pytest.raises(ValueError, match=msg): constructor(ivs) @@ -420,32 +410,29 @@ def test_constructor_errors(self, constructor): @pytest.mark.filterwarnings("ignore:Passing keywords other:FutureWarning") @pytest.mark.parametrize( - "data, inclusive", + "data, closed", [ ([], "both"), ([np.nan, np.nan], "neither"), ( - [ - Interval(0, 3, inclusive="neither"), - Interval(2, 5, inclusive="neither"), - ], + [Interval(0, 3, closed="neither"), Interval(2, 5, closed="neither")], "left", ), ( - [Interval(0, 3, inclusive="left"), Interval(2, 5, inclusive="right")], + [Interval(0, 3, closed="left"), Interval(2, 5, closed="right")], "neither", ), - (IntervalIndex.from_breaks(range(5), inclusive="both"), "right"), + (IntervalIndex.from_breaks(range(5), closed="both"), "right"), ], ) - def test_override_inferred_inclusive(self, constructor, data, inclusive): + def test_override_inferred_closed(self, constructor, data, closed): # GH 19370 if isinstance(data, IntervalIndex): tuples = data.to_tuples() else: tuples = [(iv.left, iv.right) if notna(iv) else iv for iv in data] - expected = IntervalIndex.from_tuples(tuples, inclusive=inclusive) - result = constructor(data, inclusive=inclusive) + expected = IntervalIndex.from_tuples(tuples, closed=closed) + result = constructor(data, closed=closed) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -460,27 +447,27 @@ def test_index_object_dtype(self, values_constructor): assert type(result) is Index tm.assert_numpy_array_equal(result.values, np.array(values)) - def test_index_mixed_inclusive(self): + def test_index_mixed_closed(self): # GH27172 intervals = [ - Interval(0, 1, inclusive="left"), - Interval(1, 2, inclusive="right"), - Interval(2, 3, inclusive="neither"), - Interval(3, 4, inclusive="both"), + Interval(0, 1, closed="left"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="neither"), + Interval(3, 4, closed="both"), ] result = Index(intervals) expected = Index(intervals, dtype=object) tm.assert_index_equal(result, expected) -def test_dtype_inclusive_mismatch(): - # GH#38394 +def test_dtype_closed_mismatch(): + # GH#38394 closed specified in both dtype and IntervalIndex constructor dtype = IntervalDtype(np.int64, "left") - msg = "inclusive keyword does not match dtype.inclusive" + msg = "closed keyword does not match dtype.closed" with pytest.raises(ValueError, match=msg): - IntervalIndex([], dtype=dtype, inclusive="neither") + IntervalIndex([], dtype=dtype, closed="neither") with pytest.raises(ValueError, match=msg): - IntervalArray([], dtype=dtype, inclusive="neither") + IntervalArray([], dtype=dtype, closed="neither") diff --git a/pandas/tests/indexes/interval/test_equals.py b/pandas/tests/indexes/interval/test_equals.py index a873116600d6d..87e2348e5fdb3 100644 --- a/pandas/tests/indexes/interval/test_equals.py +++ b/pandas/tests/indexes/interval/test_equals.py @@ -8,7 +8,7 @@ class TestEquals: def test_equals(self, closed): - expected = IntervalIndex.from_breaks(np.arange(5), inclusive=closed) + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) assert expected.equals(expected) assert expected.equals(expected.copy()) @@ -21,16 +21,16 @@ def test_equals(self, closed): assert not expected.equals(date_range("20130101", periods=2)) expected_name1 = IntervalIndex.from_breaks( - np.arange(5), inclusive=closed, name="foo" + np.arange(5), closed=closed, name="foo" ) expected_name2 = IntervalIndex.from_breaks( - np.arange(5), inclusive=closed, name="bar" + np.arange(5), closed=closed, name="bar" ) assert expected.equals(expected_name1) assert expected_name1.equals(expected_name2) - for other_inclusive in {"left", "right", "both", "neither"} - {closed}: - expected_other_inclusive = IntervalIndex.from_breaks( - np.arange(5), inclusive=other_inclusive + for other_closed in {"left", "right", "both", "neither"} - {closed}: + expected_other_closed = IntervalIndex.from_breaks( + np.arange(5), closed=other_closed ) - assert not expected.equals(expected_other_inclusive) + assert not expected.equals(expected_other_closed) diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index 2d9b8c83c7ab2..db477003900bc 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -17,8 +17,7 @@ class TestIntervalIndexRendering: def test_frame_repr(self): # https://github.com/pandas-dev/pandas/pull/24134/files df = DataFrame( - {"A": [1, 2, 3, 4]}, - index=IntervalIndex.from_breaks([0, 1, 2, 3, 4], "right"), + {"A": [1, 2, 3, 4]}, index=IntervalIndex.from_breaks([0, 1, 2, 3, 4]) ) result = repr(df) expected = " A\n(0, 1] 1\n(1, 2] 2\n(2, 3] 3\n(3, 4] 4" @@ -41,7 +40,7 @@ def test_frame_repr(self): ) def test_repr_missing(self, constructor, expected): # GH 25984 - index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)], "right") + index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)]) obj = constructor(list("abc"), index=index) result = repr(obj) assert result == expected @@ -58,8 +57,7 @@ def test_repr_floats(self): Float64Index([329.973, 345.137], dtype="float64"), Float64Index([345.137, 360.191], dtype="float64"), ) - ], - "right", + ] ), ) result = str(markers) @@ -67,7 +65,7 @@ def test_repr_floats(self): assert result == expected @pytest.mark.parametrize( - "tuples, inclusive, expected_data", + "tuples, closed, expected_data", [ ([(0, 1), (1, 2), (2, 3)], "left", ["[0, 1)", "[1, 2)", "[2, 3)"]), ( @@ -99,9 +97,9 @@ def test_repr_floats(self): ), ], ) - def test_to_native_types(self, tuples, inclusive, expected_data): + def test_to_native_types(self, tuples, closed, expected_data): # GH 28210 - index = IntervalIndex.from_tuples(tuples, inclusive=inclusive) + index = IntervalIndex.from_tuples(tuples, closed=closed) result = index._format_native_types() expected = np.array(expected_data) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py index 74d17b31aff27..9b4afcc9c00b8 100644 --- a/pandas/tests/indexes/interval/test_indexing.py +++ b/pandas/tests/indexes/interval/test_indexing.py @@ -29,23 +29,23 @@ class TestGetLoc: @pytest.mark.parametrize("side", ["right", "left", "both", "neither"]) def test_get_loc_interval(self, closed, side): - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) for bound in [[0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [2.5, 3], [-1, 4]]: # if get_loc is supplied an interval, it should only search # for exact matches, not overlaps or covers, else KeyError. - msg = re.escape(f"Interval({bound[0]}, {bound[1]}, inclusive='{side}')") + msg = re.escape(f"Interval({bound[0]}, {bound[1]}, closed='{side}')") if closed == side: if bound == [0, 1]: - assert idx.get_loc(Interval(0, 1, inclusive=side)) == 0 + assert idx.get_loc(Interval(0, 1, closed=side)) == 0 elif bound == [2, 3]: - assert idx.get_loc(Interval(2, 3, inclusive=side)) == 1 + assert idx.get_loc(Interval(2, 3, closed=side)) == 1 else: with pytest.raises(KeyError, match=msg): - idx.get_loc(Interval(*bound, inclusive=side)) + idx.get_loc(Interval(*bound, closed=side)) else: with pytest.raises(KeyError, match=msg): - idx.get_loc(Interval(*bound, inclusive=side)) + idx.get_loc(Interval(*bound, closed=side)) @pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]) def test_get_loc_scalar(self, closed, scalar): @@ -59,7 +59,7 @@ def test_get_loc_scalar(self, closed, scalar): "neither": {0.5: 0, 2.5: 1}, } - idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) # if get_loc is supplied a scalar, it should return the index of # the interval which contains the scalar, or KeyError. @@ -72,7 +72,7 @@ def test_get_loc_scalar(self, closed, scalar): @pytest.mark.parametrize("scalar", [-1, 0, 0.5, 3, 4.5, 5, 6]) def test_get_loc_length_one_scalar(self, scalar, closed): # GH 20921 - index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) if scalar in index[0]: result = index.get_loc(scalar) assert result == 0 @@ -80,21 +80,19 @@ def test_get_loc_length_one_scalar(self, scalar, closed): with pytest.raises(KeyError, match=str(scalar)): index.get_loc(scalar) - @pytest.mark.parametrize("other_inclusive", ["left", "right", "both", "neither"]) + @pytest.mark.parametrize("other_closed", ["left", "right", "both", "neither"]) @pytest.mark.parametrize("left, right", [(0, 5), (-1, 4), (-1, 6), (6, 7)]) - def test_get_loc_length_one_interval(self, left, right, closed, other_inclusive): + def test_get_loc_length_one_interval(self, left, right, closed, other_closed): # GH 20921 - index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) - interval = Interval(left, right, inclusive=other_inclusive) + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) + interval = Interval(left, right, closed=other_closed) if interval == index[0]: result = index.get_loc(interval) assert result == 0 else: with pytest.raises( KeyError, - match=re.escape( - f"Interval({left}, {right}, inclusive='{other_inclusive}')" - ), + match=re.escape(f"Interval({left}, {right}, closed='{other_closed}')"), ): index.get_loc(interval) @@ -198,35 +196,23 @@ class TestGetIndexer: @pytest.mark.parametrize( "query, expected", [ - ([Interval(2, 4, inclusive="right")], [1]), - ([Interval(2, 4, inclusive="left")], [-1]), - ([Interval(2, 4, inclusive="both")], [-1]), - ([Interval(2, 4, inclusive="neither")], [-1]), - ([Interval(1, 4, inclusive="right")], [-1]), - ([Interval(0, 4, inclusive="right")], [-1]), - ([Interval(0.5, 1.5, inclusive="right")], [-1]), - ( - [Interval(2, 4, inclusive="right"), Interval(0, 1, inclusive="right")], - [1, -1], - ), - ( - [Interval(2, 4, inclusive="right"), Interval(2, 4, inclusive="right")], - [1, 1], - ), - ( - [Interval(5, 7, inclusive="right"), Interval(2, 4, inclusive="right")], - [2, 1], - ), - ( - [Interval(2, 4, inclusive="right"), Interval(2, 4, inclusive="left")], - [1, -1], - ), + ([Interval(2, 4, closed="right")], [1]), + ([Interval(2, 4, closed="left")], [-1]), + ([Interval(2, 4, closed="both")], [-1]), + ([Interval(2, 4, closed="neither")], [-1]), + ([Interval(1, 4, closed="right")], [-1]), + ([Interval(0, 4, closed="right")], [-1]), + ([Interval(0.5, 1.5, closed="right")], [-1]), + ([Interval(2, 4, closed="right"), Interval(0, 1, closed="right")], [1, -1]), + ([Interval(2, 4, closed="right"), Interval(2, 4, closed="right")], [1, 1]), + ([Interval(5, 7, closed="right"), Interval(2, 4, closed="right")], [2, 1]), + ([Interval(2, 4, closed="right"), Interval(2, 4, closed="left")], [1, -1]), ], ) def test_get_indexer_with_interval(self, query, expected): tuples = [(0, 2), (2, 4), (5, 7)] - index = IntervalIndex.from_tuples(tuples, inclusive="right") + index = IntervalIndex.from_tuples(tuples, closed="right") result = index.get_indexer(query) expected = np.array(expected, dtype="intp") @@ -255,7 +241,7 @@ def test_get_indexer_with_interval(self, query, expected): def test_get_indexer_with_int_and_float(self, query, expected): tuples = [(0, 1), (1, 2), (3, 4)] - index = IntervalIndex.from_tuples(tuples, inclusive="right") + index = IntervalIndex.from_tuples(tuples, closed="right") result = index.get_indexer(query) expected = np.array(expected, dtype="intp") @@ -264,7 +250,7 @@ def test_get_indexer_with_int_and_float(self, query, expected): @pytest.mark.parametrize("item", [[3], np.arange(0.5, 5, 0.5)]) def test_get_indexer_length_one(self, item, closed): # GH 17284 - index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) result = index.get_indexer(item) expected = np.array([0] * len(item), dtype="intp") tm.assert_numpy_array_equal(result, expected) @@ -272,7 +258,7 @@ def test_get_indexer_length_one(self, item, closed): @pytest.mark.parametrize("size", [1, 5]) def test_get_indexer_length_one_interval(self, size, closed): # GH 17284 - index = IntervalIndex.from_tuples([(0, 5)], inclusive=closed) + index = IntervalIndex.from_tuples([(0, 5)], closed=closed) result = index.get_indexer([Interval(0, 5, closed)] * size) expected = np.array([0] * size, dtype="intp") tm.assert_numpy_array_equal(result, expected) @@ -282,14 +268,14 @@ def test_get_indexer_length_one_interval(self, size, closed): [ IntervalIndex.from_tuples([(7, 8), (1, 2), (3, 4), (0, 1)]), IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4), np.nan]), - IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="both"), + IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], closed="both"), [-1, 0, 0.5, 1, 2, 2.5, np.nan], ["foo", "foo", "bar", "baz"], ], ) def test_get_indexer_categorical(self, target, ordered): # GH 30063: categorical and non-categorical results should be consistent - index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="right") + index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) categorical_target = CategoricalIndex(target, ordered=ordered) result = index.get_indexer(categorical_target) @@ -298,7 +284,7 @@ def test_get_indexer_categorical(self, target, ordered): def test_get_indexer_categorical_with_nans(self): # GH#41934 nans in both index and in target - ii = IntervalIndex.from_breaks(range(5), inclusive="right") + ii = IntervalIndex.from_breaks(range(5)) ii2 = ii.append(IntervalIndex([np.nan])) ci2 = CategoricalIndex(ii2) @@ -317,7 +303,7 @@ def test_get_indexer_categorical_with_nans(self): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( - "tuples, inclusive", + "tuples, closed", [ ([(0, 2), (1, 3), (3, 4)], "neither"), ([(0, 5), (1, 4), (6, 7)], "left"), @@ -325,9 +311,9 @@ def test_get_indexer_categorical_with_nans(self): ([(0, 1), (2, 3), (3, 4)], "both"), ], ) - def test_get_indexer_errors(self, tuples, inclusive): + def test_get_indexer_errors(self, tuples, closed): # IntervalIndex needs non-overlapping for uniqueness when querying - index = IntervalIndex.from_tuples(tuples, inclusive=inclusive) + index = IntervalIndex.from_tuples(tuples, closed=closed) msg = ( "cannot handle overlapping indices; use " @@ -359,7 +345,7 @@ def test_get_indexer_errors(self, tuples, inclusive): def test_get_indexer_non_unique_with_int_and_float(self, query, expected): tuples = [(0, 2.5), (1, 3), (2, 4)] - index = IntervalIndex.from_tuples(tuples, inclusive="left") + index = IntervalIndex.from_tuples(tuples, closed="left") result_indexer, result_missing = index.get_indexer_non_unique(query) expected_indexer = np.array(expected[0], dtype="intp") @@ -461,45 +447,45 @@ def test_slice_locs_with_interval(self): assert index.slice_locs(start=Interval(2, 4), end=Interval(0, 2)) == (2, 2) # unsorted duplicates - index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)], "right") + index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)]) with pytest.raises( KeyError, match=re.escape( '"Cannot get left slice bound for non-unique label: ' - "Interval(0, 2, inclusive='right')\"" + "Interval(0, 2, closed='right')\"" ), ): - index.slice_locs(start=Interval(0, 2, "right"), end=Interval(2, 4, "right")) + index.slice_locs(start=Interval(0, 2), end=Interval(2, 4)) with pytest.raises( KeyError, match=re.escape( '"Cannot get left slice bound for non-unique label: ' - "Interval(0, 2, inclusive='right')\"" + "Interval(0, 2, closed='right')\"" ), ): - index.slice_locs(start=Interval(0, 2, "right")) + index.slice_locs(start=Interval(0, 2)) - assert index.slice_locs(end=Interval(2, 4, "right")) == (0, 2) + assert index.slice_locs(end=Interval(2, 4)) == (0, 2) with pytest.raises( KeyError, match=re.escape( '"Cannot get right slice bound for non-unique label: ' - "Interval(0, 2, inclusive='right')\"" + "Interval(0, 2, closed='right')\"" ), ): - index.slice_locs(end=Interval(0, 2, "right")) + index.slice_locs(end=Interval(0, 2)) with pytest.raises( KeyError, match=re.escape( '"Cannot get right slice bound for non-unique label: ' - "Interval(0, 2, inclusive='right')\"" + "Interval(0, 2, closed='right')\"" ), ): - index.slice_locs(start=Interval(2, 4, "right"), end=Interval(0, 2, "right")) + index.slice_locs(start=Interval(2, 4), end=Interval(0, 2)) # another unsorted duplicates index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)]) @@ -513,7 +499,7 @@ def test_slice_locs_with_interval(self): def test_slice_locs_with_ints_and_floats_succeeds(self): # increasing non-overlapping - index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)], inclusive="right") + index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]) assert index.slice_locs(0, 1) == (0, 1) assert index.slice_locs(0, 2) == (0, 2) @@ -523,7 +509,7 @@ def test_slice_locs_with_ints_and_floats_succeeds(self): assert index.slice_locs(0, 4) == (0, 3) # decreasing non-overlapping - index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)], inclusive="right") + index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)]) assert index.slice_locs(0, 1) == (3, 3) assert index.slice_locs(0, 2) == (3, 2) assert index.slice_locs(0, 3) == (3, 1) @@ -544,7 +530,7 @@ def test_slice_locs_with_ints_and_floats_succeeds(self): ) def test_slice_locs_with_ints_and_floats_errors(self, tuples, query): start, stop = query - index = IntervalIndex.from_tuples(tuples, inclusive="right") + index = IntervalIndex.from_tuples(tuples) with pytest.raises( KeyError, match=( @@ -599,17 +585,17 @@ class TestContains: def test_contains_dunder(self): - index = IntervalIndex.from_arrays([0, 1], [1, 2], inclusive="right") + index = IntervalIndex.from_arrays([0, 1], [1, 2], closed="right") # __contains__ requires perfect matches to intervals. assert 0 not in index assert 1 not in index assert 2 not in index - assert Interval(0, 1, inclusive="right") in index - assert Interval(0, 2, inclusive="right") not in index - assert Interval(0, 0.5, inclusive="right") not in index - assert Interval(3, 5, inclusive="right") not in index - assert Interval(-1, 0, inclusive="left") not in index - assert Interval(0, 1, inclusive="left") not in index - assert Interval(0, 1, inclusive="both") not in index + assert Interval(0, 1, closed="right") in index + assert Interval(0, 2, closed="right") not in index + assert Interval(0, 0.5, closed="right") not in index + assert Interval(3, 5, closed="right") not in index + assert Interval(-1, 0, closed="left") not in index + assert Interval(0, 1, closed="left") not in index + assert Interval(0, 1, closed="both") not in index diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 5bf29093152d8..37c13c37d070b 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -28,21 +28,21 @@ def name(request): class TestIntervalIndex: - index = IntervalIndex.from_arrays([0, 1], [1, 2], "right") + index = IntervalIndex.from_arrays([0, 1], [1, 2]) - def create_index(self, inclusive="right"): - return IntervalIndex.from_breaks(range(11), inclusive=inclusive) + def create_index(self, closed="right"): + return IntervalIndex.from_breaks(range(11), closed=closed) - def create_index_with_nan(self, inclusive="right"): + def create_index_with_nan(self, closed="right"): mask = [True, False] + [True] * 8 return IntervalIndex.from_arrays( np.where(mask, np.arange(10), np.nan), np.where(mask, np.arange(1, 11), np.nan), - inclusive=inclusive, + closed=closed, ) def test_properties(self, closed): - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) assert len(index) == 10 assert index.size == 10 assert index.shape == (10,) @@ -51,7 +51,7 @@ def test_properties(self, closed): tm.assert_index_equal(index.right, Index(np.arange(1, 11))) tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5))) - assert index.inclusive == closed + assert index.closed == closed ivs = [ Interval(left, right, closed) @@ -61,7 +61,7 @@ def test_properties(self, closed): tm.assert_numpy_array_equal(np.asarray(index), expected) # with nans - index = self.create_index_with_nan(inclusive=closed) + index = self.create_index_with_nan(closed=closed) assert len(index) == 10 assert index.size == 10 assert index.shape == (10,) @@ -73,7 +73,7 @@ def test_properties(self, closed): tm.assert_index_equal(index.right, expected_right) tm.assert_index_equal(index.mid, expected_mid) - assert index.inclusive == closed + assert index.closed == closed ivs = [ Interval(left, right, closed) if notna(left) else np.nan @@ -93,7 +93,7 @@ def test_properties(self, closed): ) def test_length(self, closed, breaks): # GH 18789 - index = IntervalIndex.from_breaks(breaks, inclusive=closed) + index = IntervalIndex.from_breaks(breaks, closed=closed) result = index.length expected = Index(iv.length for iv in index) tm.assert_index_equal(result, expected) @@ -105,7 +105,7 @@ def test_length(self, closed, breaks): tm.assert_index_equal(result, expected) def test_with_nans(self, closed): - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) assert index.hasnans is False result = index.isna() @@ -116,7 +116,7 @@ def test_with_nans(self, closed): expected = np.ones(len(index), dtype=bool) tm.assert_numpy_array_equal(result, expected) - index = self.create_index_with_nan(inclusive=closed) + index = self.create_index_with_nan(closed=closed) assert index.hasnans is True result = index.isna() @@ -128,7 +128,7 @@ def test_with_nans(self, closed): tm.assert_numpy_array_equal(result, expected) def test_copy(self, closed): - expected = self.create_index(inclusive=closed) + expected = self.create_index(closed=closed) result = expected.copy() assert result.equals(expected) @@ -141,7 +141,7 @@ def test_ensure_copied_data(self, closed): # exercise the copy flag in the constructor # not copying - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) result = IntervalIndex(index, copy=False) tm.assert_numpy_array_equal( index.left.values, result.left.values, check_same="same" @@ -160,17 +160,17 @@ def test_ensure_copied_data(self, closed): ) def test_delete(self, closed): - expected = IntervalIndex.from_breaks(np.arange(1, 11), inclusive=closed) - result = self.create_index(inclusive=closed).delete(0) + expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed) + result = self.create_index(closed=closed).delete(0) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "data", [ - interval_range(0, periods=10, inclusive="neither"), - interval_range(1.7, periods=8, freq=2.5, inclusive="both"), - interval_range(Timestamp("20170101"), periods=12, inclusive="left"), - interval_range(Timedelta("1 day"), periods=6, inclusive="right"), + interval_range(0, periods=10, closed="neither"), + interval_range(1.7, periods=8, freq=2.5, closed="both"), + interval_range(Timestamp("20170101"), periods=12, closed="left"), + interval_range(Timedelta("1 day"), periods=6, closed="right"), ], ) def test_insert(self, data): @@ -201,11 +201,11 @@ def test_insert(self, data): with pytest.raises(TypeError, match=msg): data._data.insert(1, "foo") - # invalid inclusive - msg = "'value.inclusive' is 'left', expected 'right'." - for inclusive in {"left", "right", "both", "neither"} - {item.inclusive}: - msg = f"'value.inclusive' is '{inclusive}', expected '{item.inclusive}'." - bad_item = Interval(item.left, item.right, inclusive=inclusive) + # invalid closed + msg = "'value.closed' is 'left', expected 'right'." + for closed in {"left", "right", "both", "neither"} - {item.closed}: + msg = f"'value.closed' is '{closed}', expected '{item.closed}'." + bad_item = Interval(item.left, item.right, closed=closed) res = data.insert(1, bad_item) expected = data.astype(object).insert(1, bad_item) tm.assert_index_equal(res, expected) @@ -213,7 +213,7 @@ def test_insert(self, data): data._data.insert(1, bad_item) # GH 18295 (test missing) - na_idx = IntervalIndex([np.nan], inclusive=data.inclusive) + na_idx = IntervalIndex([np.nan], closed=data.closed) for na in [np.nan, None, pd.NA]: expected = data[:1].append(na_idx).append(data[1:]) result = data.insert(1, na) @@ -235,93 +235,93 @@ def test_is_unique_interval(self, closed): Interval specific tests for is_unique in addition to base class tests """ # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) assert idx.is_unique is True # unique overlapping - shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_unique is True # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], inclusive=closed) + idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) assert idx.is_unique is True # unique NaN - idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], inclusive=closed) + idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], closed=closed) assert idx.is_unique is True # non-unique NaN idx = IntervalIndex.from_tuples( - [(np.NaN, np.NaN), (np.NaN, np.NaN)], inclusive=closed + [(np.NaN, np.NaN), (np.NaN, np.NaN)], closed=closed ) assert idx.is_unique is False def test_monotonic(self, closed): # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], inclusive=closed) + idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], closed=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], closed=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], closed=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], inclusive=closed) + idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], closed=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], closed=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # increasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is False assert idx._is_strictly_monotonic_decreasing is False # decreasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], inclusive=closed) + idx = IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], closed=closed) assert idx.is_monotonic_increasing is False assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is True # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], inclusive=closed) + idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is False assert idx.is_monotonic_decreasing is True assert idx._is_strictly_monotonic_decreasing is False # empty - idx = IntervalIndex([], inclusive=closed) + idx = IntervalIndex([], closed=closed) assert idx.is_monotonic_increasing is True assert idx._is_strictly_monotonic_increasing is True assert idx.is_monotonic_decreasing is True @@ -338,22 +338,22 @@ def test_is_monotonic_with_nans(self): assert not index.is_monotonic_decreasing def test_get_item(self, closed): - i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), inclusive=closed) - assert i[0] == Interval(0.0, 1.0, inclusive=closed) - assert i[1] == Interval(1.0, 2.0, inclusive=closed) + i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed) + assert i[0] == Interval(0.0, 1.0, closed=closed) + assert i[1] == Interval(1.0, 2.0, closed=closed) assert isna(i[2]) result = i[0:1] - expected = IntervalIndex.from_arrays((0.0,), (1.0,), inclusive=closed) + expected = IntervalIndex.from_arrays((0.0,), (1.0,), closed=closed) tm.assert_index_equal(result, expected) result = i[0:2] - expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), inclusive=closed) + expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), closed=closed) tm.assert_index_equal(result, expected) result = i[1:3] expected = IntervalIndex.from_arrays( - (1.0, np.nan), (2.0, np.nan), inclusive=closed + (1.0, np.nan), (2.0, np.nan), closed=closed ) tm.assert_index_equal(result, expected) @@ -477,7 +477,7 @@ def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key): def test_contains_method(self): # can select values that are IN the range of a value - i = IntervalIndex.from_arrays([0, 1], [1, 2], "right") + i = IntervalIndex.from_arrays([0, 1], [1, 2]) expected = np.array([False, False], dtype="bool") actual = i.contains(0) @@ -500,18 +500,18 @@ def test_contains_method(self): def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], inclusive=closed) + expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed) - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], inclusive=closed) + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], inclusive=closed) + ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) def test_non_contiguous(self, closed): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)], inclusive=closed) + index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) expected = np.array([0, -1, 1], dtype="intp") @@ -520,7 +520,7 @@ def test_non_contiguous(self, closed): assert 1.5 not in index def test_isin(self, closed): - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) expected = np.array([True] + [False] * (len(index) - 1)) result = index.isin(index[:1]) @@ -529,7 +529,7 @@ def test_isin(self, closed): result = index.isin([index[0]]) tm.assert_numpy_array_equal(result, expected) - other = IntervalIndex.from_breaks(np.arange(-2, 10), inclusive=closed) + other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed) expected = np.array([True] * (len(index) - 1) + [False]) result = index.isin(other) tm.assert_numpy_array_equal(result, expected) @@ -537,9 +537,9 @@ def test_isin(self, closed): result = index.isin(other.tolist()) tm.assert_numpy_array_equal(result, expected) - for other_inclusive in {"right", "left", "both", "neither"}: - other = self.create_index(inclusive=other_inclusive) - expected = np.repeat(closed == other_inclusive, len(index)) + for other_closed in {"right", "left", "both", "neither"}: + other = self.create_index(closed=other_closed) + expected = np.repeat(closed == other_closed, len(index)) result = index.isin(other) tm.assert_numpy_array_equal(result, expected) @@ -547,14 +547,14 @@ def test_isin(self, closed): tm.assert_numpy_array_equal(result, expected) def test_comparison(self): - actual = Interval(0, 1, "right") < self.index + actual = Interval(0, 1) < self.index expected = np.array([False, True]) tm.assert_numpy_array_equal(actual, expected) - actual = Interval(0.5, 1.5, "right") < self.index + actual = Interval(0.5, 1.5) < self.index expected = np.array([False, True]) tm.assert_numpy_array_equal(actual, expected) - actual = self.index > Interval(0.5, 1.5, "right") + actual = self.index > Interval(0.5, 1.5) tm.assert_numpy_array_equal(actual, expected) actual = self.index == self.index @@ -612,11 +612,9 @@ def test_comparison(self): def test_missing_values(self, closed): idx = Index( - [np.nan, Interval(0, 1, inclusive=closed), Interval(1, 2, inclusive=closed)] - ) - idx2 = IntervalIndex.from_arrays( - [np.nan, 0, 1], [np.nan, 1, 2], inclusive=closed + [np.nan, Interval(0, 1, closed=closed), Interval(1, 2, closed=closed)] ) + idx2 = IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) msg = ( @@ -625,13 +623,13 @@ def test_missing_values(self, closed): ) with pytest.raises(ValueError, match=msg): IntervalIndex.from_arrays( - [np.nan, 0, 1], np.array([0, 1, 2]), inclusive=closed + [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed ) tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) def test_sort_values(self, closed): - index = self.create_index(inclusive=closed) + index = self.create_index(closed=closed) result = index.sort_values() tm.assert_index_equal(result, index) @@ -654,7 +652,7 @@ def test_sort_values(self, closed): def test_datetime(self, tz): start = Timestamp("2000-01-01", tz=tz) dates = date_range(start=start, periods=10) - index = IntervalIndex.from_breaks(dates, "right") + index = IntervalIndex.from_breaks(dates) # test mid start = Timestamp("2000-01-01T12:00", tz=tz) @@ -666,10 +664,10 @@ def test_datetime(self, tz): assert Timestamp("2000-01-01T12", tz=tz) not in index assert Timestamp("2000-01-02", tz=tz) not in index iv_true = Interval( - Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz), "right" + Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz) ) iv_false = Interval( - Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz), "right" + Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz) ) assert iv_true in index assert iv_false not in index @@ -694,62 +692,58 @@ def test_datetime(self, tz): def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2], inclusive=closed) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3], inclusive=closed) + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) result = index1.append(index2) - expected = IntervalIndex.from_arrays( - [0, 1, 1, 2], [1, 2, 2, 3], inclusive=closed - ) + expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) result = index1.append([index1, index2]) expected = IntervalIndex.from_arrays( - [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], inclusive=closed + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed ) tm.assert_index_equal(result, expected) - for other_inclusive in {"left", "right", "both", "neither"} - {closed}: - index_other_inclusive = IntervalIndex.from_arrays( - [0, 1], [1, 2], inclusive=other_inclusive - ) - result = index1.append(index_other_inclusive) - expected = index1.astype(object).append( - index_other_inclusive.astype(object) + for other_closed in {"left", "right", "both", "neither"} - {closed}: + index_other_closed = IntervalIndex.from_arrays( + [0, 1], [1, 2], closed=other_closed ) + result = index1.append(index_other_closed) + expected = index1.astype(object).append(index_other_closed.astype(object)) tm.assert_index_equal(result, expected) def test_is_non_overlapping_monotonic(self, closed): # Should be True in all cases tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, inclusive=closed) + idx = IntervalIndex.from_tuples(tpls, closed=closed) assert idx.is_non_overlapping_monotonic is True - idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) assert idx.is_non_overlapping_monotonic is True # Should be False in all cases (overlapping) tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, inclusive=closed) + idx = IntervalIndex.from_tuples(tpls, closed=closed) assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) assert idx.is_non_overlapping_monotonic is False # Should be False in all cases (non-monotonic) tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - idx = IntervalIndex.from_tuples(tpls, inclusive=closed) + idx = IntervalIndex.from_tuples(tpls, closed=closed) assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(tpls[::-1], inclusive=closed) + idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) assert idx.is_non_overlapping_monotonic is False - # Should be False for inclusive='both', otherwise True (GH16560) + # Should be False for closed='both', otherwise True (GH16560) if closed == "both": - idx = IntervalIndex.from_breaks(range(4), inclusive=closed) + idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is False else: - idx = IntervalIndex.from_breaks(range(4), inclusive=closed) + idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is True @pytest.mark.parametrize( @@ -766,34 +760,34 @@ def test_is_overlapping(self, start, shift, na_value, closed): # non-overlapping tuples = [(start + n * shift, start + (n + 1) * shift) for n in (0, 2, 4)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) assert index.is_overlapping is False # non-overlapping with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) assert index.is_overlapping is False # overlapping tuples = [(start + n * shift, start + (n + 2) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) assert index.is_overlapping is True # overlapping with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) assert index.is_overlapping is True # common endpoints tuples = [(start + n * shift, start + (n + 1) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) result = index.is_overlapping expected = closed == "both" assert result is expected # common endpoints with NA tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, inclusive=closed) + index = IntervalIndex.from_tuples(tuples, closed=closed) result = index.is_overlapping assert result is expected @@ -871,21 +865,21 @@ def test_nbytes(self): expected = 64 # 4 * 8 * 2 assert result == expected - @pytest.mark.parametrize("new_inclusive", ["left", "right", "both", "neither"]) - def test_set_inclusive(self, name, closed, new_inclusive): + @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) + def test_set_closed(self, name, closed, new_closed): # GH 21670 - index = interval_range(0, 5, inclusive=closed, name=name) - result = index.set_inclusive(new_inclusive) - expected = interval_range(0, 5, inclusive=new_inclusive, name=name) + index = interval_range(0, 5, closed=closed, name=name) + result = index.set_closed(new_closed) + expected = interval_range(0, 5, closed=new_closed, name=name) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("bad_inclusive", ["foo", 10, "LEFT", True, False]) - def test_set_inclusive_errors(self, bad_inclusive): + @pytest.mark.parametrize("bad_closed", ["foo", 10, "LEFT", True, False]) + def test_set_closed_errors(self, bad_closed): # GH 21670 index = interval_range(0, 5) - msg = f"invalid option for 'inclusive': {bad_inclusive}" + msg = f"invalid option for 'closed': {bad_closed}" with pytest.raises(ValueError, match=msg): - index.set_inclusive(bad_inclusive) + index.set_closed(bad_closed) def test_is_all_dates(self): # GH 23576 @@ -895,35 +889,6 @@ def test_is_all_dates(self): year_2017_index = IntervalIndex([year_2017]) assert not year_2017_index._is_all_dates - def test_interval_index_error_and_warning(self): - # GH 40245 - msg = "Can only specify 'closed' or 'inclusive', not both." - msg_warn = "the 'closed'' keyword is deprecated, use 'inclusive' instead." - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_breaks(range(11), closed="both", inclusive="both") - - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_arrays( - [0, 1], [1, 2], closed="both", inclusive="both" - ) - - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_tuples( - [(0, 1), (0.5, 1.5)], closed="both", inclusive="both" - ) - - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_breaks(range(11), closed="both") - - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_arrays([0, 1], [1, 2], closed="both") - - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed="both") - def test_dir(): # GH#27571 dir(interval_index) should not raise @@ -951,9 +916,3 @@ def test_searchsorted_invalid_argument(arg): msg = "'<' not supported between instances of 'pandas._libs.interval.Interval' and " with pytest.raises(TypeError, match=msg): values.searchsorted(arg) - - -def test_interval_range_deprecated_closed(): - # GH#40245 - with tm.assert_produces_warning(FutureWarning): - interval_range(start=0, end=5, closed="right") diff --git a/pandas/tests/indexes/interval/test_interval_range.py b/pandas/tests/indexes/interval/test_interval_range.py index 3bde2f51178dc..2f28c33a3bbc6 100644 --- a/pandas/tests/indexes/interval/test_interval_range.py +++ b/pandas/tests/indexes/interval/test_interval_range.py @@ -30,29 +30,29 @@ class TestIntervalRange: def test_constructor_numeric(self, closed, name, freq, periods): start, end = 0, 100 breaks = np.arange(101, step=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) # defined from start/end/freq result = interval_range( - start=start, end=end, freq=freq, name=name, inclusive=closed + start=start, end=end, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from start/periods/freq result = interval_range( - start=start, periods=periods, freq=freq, name=name, inclusive=closed + start=start, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from end/periods/freq result = interval_range( - end=end, periods=periods, freq=freq, name=name, inclusive=closed + end=end, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # GH 20976: linspace behavior defined from start/end/periods result = interval_range( - start=start, end=end, periods=periods, name=name, inclusive=closed + start=start, end=end, periods=periods, name=name, closed=closed ) tm.assert_index_equal(result, expected) @@ -63,23 +63,23 @@ def test_constructor_numeric(self, closed, name, freq, periods): def test_constructor_timestamp(self, closed, name, freq, periods, tz): start, end = Timestamp("20180101", tz=tz), Timestamp("20181231", tz=tz) breaks = date_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) # defined from start/end/freq result = interval_range( - start=start, end=end, freq=freq, name=name, inclusive=closed + start=start, end=end, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from start/periods/freq result = interval_range( - start=start, periods=periods, freq=freq, name=name, inclusive=closed + start=start, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from end/periods/freq result = interval_range( - end=end, periods=periods, freq=freq, name=name, inclusive=closed + end=end, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) @@ -88,7 +88,7 @@ def test_constructor_timestamp(self, closed, name, freq, periods, tz): # matches expected only for non-anchored offsets and tz naive # (anchored/DST transitions cause unequal spacing in expected) result = interval_range( - start=start, end=end, periods=periods, name=name, inclusive=closed + start=start, end=end, periods=periods, name=name, closed=closed ) tm.assert_index_equal(result, expected) @@ -98,29 +98,29 @@ def test_constructor_timestamp(self, closed, name, freq, periods, tz): def test_constructor_timedelta(self, closed, name, freq, periods): start, end = Timedelta("0 days"), Timedelta("100 days") breaks = timedelta_range(start=start, end=end, freq=freq) - expected = IntervalIndex.from_breaks(breaks, name=name, inclusive=closed) + expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed) # defined from start/end/freq result = interval_range( - start=start, end=end, freq=freq, name=name, inclusive=closed + start=start, end=end, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from start/periods/freq result = interval_range( - start=start, periods=periods, freq=freq, name=name, inclusive=closed + start=start, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # defined from end/periods/freq result = interval_range( - end=end, periods=periods, freq=freq, name=name, inclusive=closed + end=end, periods=periods, freq=freq, name=name, closed=closed ) tm.assert_index_equal(result, expected) # GH 20976: linspace behavior defined from start/end/periods result = interval_range( - start=start, end=end, periods=periods, name=name, inclusive=closed + start=start, end=end, periods=periods, name=name, closed=closed ) tm.assert_index_equal(result, expected) @@ -161,11 +161,9 @@ def test_no_invalid_float_truncation(self, start, end, freq): breaks = [0.5, 1.5, 2.5, 3.5, 4.5] else: breaks = [0.5, 2.0, 3.5, 5.0, 6.5] - expected = IntervalIndex.from_breaks(breaks, "right") + expected = IntervalIndex.from_breaks(breaks) - result = interval_range( - start=start, end=end, periods=4, freq=freq, inclusive="right" - ) + result = interval_range(start=start, end=end, periods=4, freq=freq) tm.assert_index_equal(result, expected) @pytest.mark.parametrize( @@ -186,9 +184,8 @@ def test_no_invalid_float_truncation(self, start, end, freq): def test_linspace_dst_transition(self, start, mid, end): # GH 20976: linspace behavior defined from start/end/periods # accounts for the hour gained/lost during DST transition - result = interval_range(start=start, end=end, periods=2, inclusive="right") - expected = IntervalIndex.from_breaks([start, mid, end], "right") - + result = interval_range(start=start, end=end, periods=2) + expected = IntervalIndex.from_breaks([start, mid, end]) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("freq", [2, 2.0]) @@ -337,7 +334,7 @@ def test_errors(self): # invalid end msg = r"end must be numeric or datetime-like, got \(0, 1\]" with pytest.raises(ValueError, match=msg): - interval_range(end=Interval(0, 1, "right"), periods=10) + interval_range(end=Interval(0, 1), periods=10) # invalid freq for datetime-like msg = "freq must be numeric or convertible to DateOffset, got foo" @@ -356,17 +353,3 @@ def test_errors(self): msg = "Start and end cannot both be tz-aware with different timezones" with pytest.raises(TypeError, match=msg): interval_range(start=start, end=end) - - def test_interval_range_error_and_warning(self): - # GH 40245 - - msg = "Can only specify 'closed' or 'inclusive', not both." - msg_warn = "the 'closed'' keyword is deprecated, use 'inclusive' instead." - - with pytest.raises(TypeError, match=msg): - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - interval_range(end=5, periods=4, closed="both", inclusive="both") - - msg = "the 'closed'' keyword is deprecated, use 'inclusive' instead." - with tm.assert_produces_warning(FutureWarning, match=msg_warn): - interval_range(end=5, periods=4, closed="right") diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 6c30d16e61582..3b9de8d9e45d9 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -42,7 +42,7 @@ def leaf_size(request): ) def tree(request, leaf_size): left = request.param - return IntervalTree(left, left + 2, leaf_size=leaf_size, inclusive="right") + return IntervalTree(left, left + 2, leaf_size=leaf_size) class TestIntervalTree: @@ -129,7 +129,7 @@ def test_get_indexer_closed(self, closed, leaf_size): found = x.astype("intp") not_found = (-1 * np.ones(1000)).astype("intp") - tree = IntervalTree(x, x + 0.5, inclusive=closed, leaf_size=leaf_size) + tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size) tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25)) expected = found if tree.closed_left else not_found @@ -151,7 +151,7 @@ def test_get_indexer_closed(self, closed, leaf_size): @pytest.mark.parametrize("order", (list(x) for x in permutations(range(3)))) def test_is_overlapping(self, closed, order, left, right, expected): # GH 23309 - tree = IntervalTree(left[order], right[order], inclusive=closed) + tree = IntervalTree(left[order], right[order], closed=closed) result = tree.is_overlapping assert result is expected @@ -160,7 +160,7 @@ def test_is_overlapping_endpoints(self, closed, order): """shared endpoints are marked as overlapping""" # GH 23309 left, right = np.arange(3, dtype="int64"), np.arange(1, 4) - tree = IntervalTree(left[order], right[order], inclusive=closed) + tree = IntervalTree(left[order], right[order], closed=closed) result = tree.is_overlapping expected = closed == "both" assert result is expected @@ -176,7 +176,7 @@ def test_is_overlapping_endpoints(self, closed, order): ) def test_is_overlapping_trivial(self, closed, left, right): # GH 23309 - tree = IntervalTree(left, right, inclusive=closed) + tree = IntervalTree(left, right, closed=closed) assert tree.is_overlapping is False @pytest.mark.skipif(not IS64, reason="GH 23440") diff --git a/pandas/tests/indexes/interval/test_pickle.py b/pandas/tests/indexes/interval/test_pickle.py index ef6db9c8a0513..308a90e72eab5 100644 --- a/pandas/tests/indexes/interval/test_pickle.py +++ b/pandas/tests/indexes/interval/test_pickle.py @@ -1,10 +1,13 @@ +import pytest + from pandas import IntervalIndex import pandas._testing as tm class TestPickle: - def test_pickle_round_trip_inclusive(self, closed): + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): # https://github.com/pandas-dev/pandas/issues/35658 - idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], inclusive=closed) + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index 2e1f6f7925374..059b0b75f4190 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -10,22 +10,20 @@ import pandas._testing as tm -def monotonic_index(start, end, dtype="int64", inclusive="right"): - return IntervalIndex.from_breaks( - np.arange(start, end, dtype=dtype), inclusive=inclusive - ) +def monotonic_index(start, end, dtype="int64", closed="right"): + return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed) -def empty_index(dtype="int64", inclusive="right"): - return IntervalIndex(np.array([], dtype=dtype), inclusive=inclusive) +def empty_index(dtype="int64", closed="right"): + return IntervalIndex(np.array([], dtype=dtype), closed=closed) class TestIntervalIndex: def test_union(self, closed, sort): - index = monotonic_index(0, 11, inclusive=closed) - other = monotonic_index(5, 13, inclusive=closed) + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) - expected = monotonic_index(0, 13, inclusive=closed) + expected = monotonic_index(0, 13, closed=closed) result = index[::-1].union(other, sort=sort) if sort is None: tm.assert_index_equal(result, expected) @@ -41,12 +39,12 @@ def test_union(self, closed, sort): def test_union_empty_result(self, closed, sort): # GH 19101: empty result, same dtype - index = empty_index(dtype="int64", inclusive=closed) + index = empty_index(dtype="int64", closed=closed) result = index.union(index, sort=sort) tm.assert_index_equal(result, index) # GH 19101: empty result, different numeric dtypes -> common dtype is f8 - other = empty_index(dtype="float64", inclusive=closed) + other = empty_index(dtype="float64", closed=closed) result = index.union(other, sort=sort) expected = other tm.assert_index_equal(result, expected) @@ -54,7 +52,7 @@ def test_union_empty_result(self, closed, sort): other = index.union(index, sort=sort) tm.assert_index_equal(result, expected) - other = empty_index(dtype="uint64", inclusive=closed) + other = empty_index(dtype="uint64", closed=closed) result = index.union(other, sort=sort) tm.assert_index_equal(result, expected) @@ -62,10 +60,10 @@ def test_union_empty_result(self, closed, sort): tm.assert_index_equal(result, expected) def test_intersection(self, closed, sort): - index = monotonic_index(0, 11, inclusive=closed) - other = monotonic_index(5, 13, inclusive=closed) + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) - expected = monotonic_index(5, 11, inclusive=closed) + expected = monotonic_index(5, 11, closed=closed) result = index[::-1].intersection(other, sort=sort) if sort is None: tm.assert_index_equal(result, expected) @@ -100,21 +98,21 @@ def test_intersection(self, closed, sort): tm.assert_index_equal(result, expected) def test_intersection_empty_result(self, closed, sort): - index = monotonic_index(0, 11, inclusive=closed) + index = monotonic_index(0, 11, closed=closed) # GH 19101: empty result, same dtype - other = monotonic_index(300, 314, inclusive=closed) - expected = empty_index(dtype="int64", inclusive=closed) + other = monotonic_index(300, 314, closed=closed) + expected = empty_index(dtype="int64", closed=closed) result = index.intersection(other, sort=sort) tm.assert_index_equal(result, expected) # GH 19101: empty result, different numeric dtypes -> common dtype is float64 - other = monotonic_index(300, 314, dtype="float64", inclusive=closed) + other = monotonic_index(300, 314, dtype="float64", closed=closed) result = index.intersection(other, sort=sort) expected = other[:0] tm.assert_index_equal(result, expected) - other = monotonic_index(300, 314, dtype="uint64", inclusive=closed) + other = monotonic_index(300, 314, dtype="uint64", closed=closed) result = index.intersection(other, sort=sort) tm.assert_index_equal(result, expected) @@ -127,7 +125,7 @@ def test_intersection_duplicates(self): tm.assert_index_equal(result, expected) def test_difference(self, closed, sort): - index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], inclusive=closed) + index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed) result = index.difference(index[:1], sort=sort) expected = index[1:] if sort is None: @@ -136,18 +134,18 @@ def test_difference(self, closed, sort): # GH 19101: empty result, same dtype result = index.difference(index, sort=sort) - expected = empty_index(dtype="int64", inclusive=closed) + expected = empty_index(dtype="int64", closed=closed) tm.assert_index_equal(result, expected) # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( - index.left.astype("float64"), index.right, inclusive=closed + index.left.astype("float64"), index.right, closed=closed ) result = index.difference(other, sort=sort) tm.assert_index_equal(result, expected) def test_symmetric_difference(self, closed, sort): - index = monotonic_index(0, 11, inclusive=closed) + index = monotonic_index(0, 11, closed=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) if sort is None: @@ -156,17 +154,17 @@ def test_symmetric_difference(self, closed, sort): # GH 19101: empty result, same dtype result = index.symmetric_difference(index, sort=sort) - expected = empty_index(dtype="int64", inclusive=closed) + expected = empty_index(dtype="int64", closed=closed) if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( - index.left.astype("float64"), index.right, inclusive=closed + index.left.astype("float64"), index.right, closed=closed ) result = index.symmetric_difference(other, sort=sort) - expected = empty_index(dtype="float64", inclusive=closed) + expected = empty_index(dtype="float64", closed=closed) tm.assert_index_equal(result, expected) @pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning") @@ -174,7 +172,7 @@ def test_symmetric_difference(self, closed, sort): "op_name", ["union", "intersection", "difference", "symmetric_difference"] ) def test_set_incompatible_types(self, closed, op_name, sort): - index = monotonic_index(0, 11, inclusive=closed) + index = monotonic_index(0, 11, closed=closed) set_op = getattr(index, op_name) # TODO: standardize return type of non-union setops type(self vs other) @@ -187,8 +185,8 @@ def test_set_incompatible_types(self, closed, op_name, sort): tm.assert_index_equal(result, expected) # mixed closed -> cast to object - for other_inclusive in {"right", "left", "both", "neither"} - {closed}: - other = monotonic_index(0, 11, inclusive=other_inclusive) + for other_closed in {"right", "left", "both", "neither"} - {closed}: + other = monotonic_index(0, 11, closed=other_closed) expected = getattr(index.astype(object), op_name)(other, sort=sort) if op_name == "difference": expected = index @@ -196,7 +194,7 @@ def test_set_incompatible_types(self, closed, op_name, sort): tm.assert_index_equal(result, expected) # GH 19016: incompatible dtypes -> cast to object - other = interval_range(Timestamp("20180101"), periods=9, inclusive=closed) + other = interval_range(Timestamp("20180101"), periods=9, closed=closed) expected = getattr(index.astype(object), op_name)(other, sort=sort) if op_name == "difference": expected = index diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 5d7fc23feb5a8..43b893b084672 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1432,10 +1432,10 @@ def test_ensure_index_from_sequences(self, data, names, expected): def test_ensure_index_mixed_closed_intervals(self): # GH27172 intervals = [ - pd.Interval(0, 1, inclusive="left"), - pd.Interval(1, 2, inclusive="right"), - pd.Interval(2, 3, inclusive="neither"), - pd.Interval(3, 4, inclusive="both"), + pd.Interval(0, 1, closed="left"), + pd.Interval(1, 2, closed="right"), + pd.Interval(2, 3, closed="neither"), + pd.Interval(3, 4, closed="both"), ] result = ensure_index(intervals) expected = Index(intervals, dtype=object) diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 7d1f1ef09fc5d..db3a569d3925b 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -13,7 +13,7 @@ class TestIntervalIndex: @pytest.fixture def series_with_interval_index(self): - return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6), "right")) + return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) def test_getitem_with_scalar(self, series_with_interval_index, indexer_sl): @@ -40,7 +40,7 @@ def test_getitem_nonoverlapping_monotonic(self, direction, closed, indexer_sl): if direction == "decreasing": tpls = tpls[::-1] - idx = IntervalIndex.from_tuples(tpls, inclusive=closed) + idx = IntervalIndex.from_tuples(tpls, closed=closed) ser = Series(list("abc"), idx) for key, expected in zip(idx.left, ser): diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index 602f45d637afb..4b89232f9fb12 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -17,9 +17,7 @@ class TestIntervalIndex: @pytest.fixture def series_with_interval_index(self): - return Series( - np.arange(5), IntervalIndex.from_breaks(np.arange(6), inclusive="right") - ) + return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6))) def test_loc_with_interval(self, series_with_interval_index, indexer_sl): @@ -30,33 +28,27 @@ def test_loc_with_interval(self, series_with_interval_index, indexer_sl): ser = series_with_interval_index.copy() expected = 0 - result = indexer_sl(ser)[Interval(0, 1, "right")] + result = indexer_sl(ser)[Interval(0, 1)] assert result == expected expected = ser.iloc[3:5] - result = indexer_sl(ser)[[Interval(3, 4, "right"), Interval(4, 5, "right")]] + result = indexer_sl(ser)[[Interval(3, 4), Interval(4, 5)]] tm.assert_series_equal(expected, result) # missing or not exact - with pytest.raises( - KeyError, match=re.escape("Interval(3, 5, inclusive='left')") - ): - indexer_sl(ser)[Interval(3, 5, inclusive="left")] + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")): + indexer_sl(ser)[Interval(3, 5, closed="left")] - with pytest.raises( - KeyError, match=re.escape("Interval(3, 5, inclusive='right')") - ): - indexer_sl(ser)[Interval(3, 5, "right")] + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): + indexer_sl(ser)[Interval(3, 5)] with pytest.raises( - KeyError, match=re.escape("Interval(-2, 0, inclusive='right')") + KeyError, match=re.escape("Interval(-2, 0, closed='right')") ): - indexer_sl(ser)[Interval(-2, 0, "right")] + indexer_sl(ser)[Interval(-2, 0)] - with pytest.raises( - KeyError, match=re.escape("Interval(5, 6, inclusive='right')") - ): - indexer_sl(ser)[Interval(5, 6, "right")] + with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")): + indexer_sl(ser)[Interval(5, 6)] def test_loc_with_scalar(self, series_with_interval_index, indexer_sl): @@ -95,11 +87,11 @@ def test_loc_with_slices(self, series_with_interval_index, indexer_sl): # slice of interval expected = ser.iloc[:3] - result = indexer_sl(ser)[Interval(0, 1, "right") : Interval(2, 3, "right")] + result = indexer_sl(ser)[Interval(0, 1) : Interval(2, 3)] tm.assert_series_equal(expected, result) expected = ser.iloc[3:] - result = indexer_sl(ser)[Interval(3, 4, "right") :] + result = indexer_sl(ser)[Interval(3, 4) :] tm.assert_series_equal(expected, result) msg = "Interval objects are not currently supported" @@ -107,7 +99,7 @@ def test_loc_with_slices(self, series_with_interval_index, indexer_sl): indexer_sl(ser)[Interval(3, 6) :] with pytest.raises(NotImplementedError, match=msg): - indexer_sl(ser)[Interval(3, 4, inclusive="left") :] + indexer_sl(ser)[Interval(3, 4, closed="left") :] def test_slice_step_ne1(self, series_with_interval_index): # GH#31658 slice of scalar with step != 1 @@ -138,7 +130,7 @@ def test_slice_interval_step(self, series_with_interval_index): def test_loc_with_overlap(self, indexer_sl): - idx = IntervalIndex.from_tuples([(1, 5), (3, 7)], inclusive="right") + idx = IntervalIndex.from_tuples([(1, 5), (3, 7)]) ser = Series(range(len(idx)), index=idx) # scalar @@ -151,25 +143,23 @@ def test_loc_with_overlap(self, indexer_sl): # interval expected = 0 - result = indexer_sl(ser)[Interval(1, 5, "right")] + result = indexer_sl(ser)[Interval(1, 5)] result == expected expected = ser - result = indexer_sl(ser)[[Interval(1, 5, "right"), Interval(3, 7, "right")]] + result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]] tm.assert_series_equal(expected, result) - with pytest.raises( - KeyError, match=re.escape("Interval(3, 5, inclusive='right')") - ): - indexer_sl(ser)[Interval(3, 5, "right")] + with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")): + indexer_sl(ser)[Interval(3, 5)] - msg = r"None of \[\[Interval\(3, 5, inclusive='right'\)\]\]" + msg = r"None of \[\[Interval\(3, 5, closed='right'\)\]\]" with pytest.raises(KeyError, match=msg): - indexer_sl(ser)[[Interval(3, 5, "right")]] + indexer_sl(ser)[[Interval(3, 5)]] # slices with interval (only exact matches) expected = ser - result = indexer_sl(ser)[Interval(1, 5, "right") : Interval(3, 7, "right")] + result = indexer_sl(ser)[Interval(1, 5) : Interval(3, 7)] tm.assert_series_equal(expected, result) msg = "'can only get slices from an IntervalIndex if bounds are" diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index 21a14ef8523f1..b94323e975cd7 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -114,7 +114,7 @@ def test_slicing(self): df = DataFrame({"value": (np.arange(100) + 1).astype("int64")}) df["D"] = pd.cut(df.value, bins=[0, 25, 50, 75, 100]) - expected = Series([11, Interval(0, 25, "right")], index=["value", "D"], name=10) + expected = Series([11, Interval(0, 25)], index=["value", "D"], name=10) result = df.iloc[10] tm.assert_series_equal(result, expected) @@ -126,7 +126,7 @@ def test_slicing(self): result = df.iloc[10:20] tm.assert_frame_equal(result, expected) - expected = Series([9, Interval(0, 25, "right")], index=["value", "D"], name=8) + expected = Series([9, Interval(0, 25)], index=["value", "D"], name=8) result = df.loc[8] tm.assert_series_equal(result, expected) @@ -495,13 +495,13 @@ def test_loc_and_at_with_categorical_index(self): # numpy object np.array([1, "b", 3.5], dtype=object), # pandas scalars - [Interval(1, 4, "right"), Interval(4, 6, "right"), Interval(6, 9, "right")], + [Interval(1, 4), Interval(4, 6), Interval(6, 9)], [Timestamp(2019, 1, 1), Timestamp(2019, 2, 1), Timestamp(2019, 3, 1)], [Timedelta(1, "d"), Timedelta(2, "d"), Timedelta(3, "D")], # pandas Integer arrays *(pd.array([1, 2, 3], dtype=dtype) for dtype in tm.ALL_INT_EA_DTYPES), # other pandas arrays - pd.IntervalIndex.from_breaks([1, 4, 6, 9], "right").array, + pd.IntervalIndex.from_breaks([1, 4, 6, 9]).array, pd.date_range("2019-01-01", periods=3).array, pd.timedelta_range(start="1d", periods=3).array, ], diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index be8fcfb4d8348..2d54a9ba370ca 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -701,7 +701,7 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): 1.1, 1 + 1j, True, - pd.Interval(1, 2, inclusive="left"), + pd.Interval(1, 2, closed="left"), pd.Timestamp("2012-01-01", tz="US/Eastern"), pd.Timestamp("2012-01-01"), pd.Timedelta(days=1), @@ -709,7 +709,7 @@ def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype): ], ) def test_fillna_interval(self, index_or_series, fill_val): - ii = pd.interval_range(1.0, 5.0, inclusive="right").insert(1, np.nan) + ii = pd.interval_range(1.0, 5.0, closed="right").insert(1, np.nan) assert isinstance(ii.dtype, pd.IntervalDtype) obj = index_or_series(ii) @@ -745,7 +745,7 @@ def test_fillna_series_timedelta64(self): 1.1, 1 + 1j, True, - pd.Interval(1, 2, inclusive="left"), + pd.Interval(1, 2, closed="left"), pd.Timestamp("2012-01-01", tz="US/Eastern"), pd.Timestamp("2012-01-01"), pd.Timedelta(days=1), diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index a7c03c672be58..b1eaf43f0b368 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1539,12 +1539,12 @@ def test_loc_getitem_interval_index(self): def test_loc_getitem_interval_index2(self): # GH#19977 - index = pd.interval_range(start=0, periods=3, inclusive="both") + index = pd.interval_range(start=0, periods=3, closed="both") df = DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=index, columns=["A", "B", "C"] ) - index_exp = pd.interval_range(start=0, periods=2, freq=1, inclusive="both") + index_exp = pd.interval_range(start=0, periods=2, freq=1, closed="both") expected = Series([1, 4], index=index_exp, name="A") result = df.loc[1, "A"] tm.assert_series_equal(result, expected) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 2f3b569c899e1..3c90eee5be999 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1271,7 +1271,7 @@ def test_interval_can_hold_element(self, dtype, element): # Careful: to get the expected Series-inplace behavior we need # `elem` to not have the same length as `arr` - ii2 = IntervalIndex.from_breaks(arr[:-1], inclusive="neither") + ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither") elem = element(ii2) self.check_series_setitem(elem, ii, False) assert not blk._can_hold_element(elem) diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index ba6366b71d854..f9836810afbe0 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -281,10 +281,7 @@ def test_multiindex_interval_datetimes(self, ext): [ range(4), pd.interval_range( - start=pd.Timestamp("2020-01-01"), - periods=4, - freq="6M", - inclusive="right", + start=pd.Timestamp("2020-01-01"), periods=4, freq="6M" ), ] ) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 7dd9b78bab1cd..93da7f71f51f9 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -607,7 +607,7 @@ def test_bar_plt_xaxis_intervalrange(self): expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")] s = Series( [1, 2], - index=[interval_range(0, 2, inclusive="both")], + index=[interval_range(0, 2, closed="both")], ) _check_plot_works(s.plot.bar) assert all( diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 7e4371100b5ad..0b1d1c4a3d346 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -172,7 +172,7 @@ def test_append_preserve_index_name(self): Index(list("abc")), pd.CategoricalIndex("A B C".split()), pd.CategoricalIndex("D E F".split(), ordered=True), - pd.IntervalIndex.from_breaks([7, 8, 9, 10], inclusive="right"), + pd.IntervalIndex.from_breaks([7, 8, 9, 10]), pd.DatetimeIndex( [ dt.datetime(2013, 1, 3, 0, 0), diff --git a/pandas/tests/reshape/test_cut.py b/pandas/tests/reshape/test_cut.py index 815890f319396..1425686f027e4 100644 --- a/pandas/tests/reshape/test_cut.py +++ b/pandas/tests/reshape/test_cut.py @@ -37,7 +37,7 @@ def test_bins(func): data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1]) result, bins = cut(data, 3, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3), "right") + intervals = IntervalIndex.from_breaks(bins.round(3)) intervals = intervals.take([0, 0, 0, 1, 2, 0]) expected = Categorical(intervals, ordered=True) @@ -49,7 +49,7 @@ def test_right(): data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=True, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3), "right") + intervals = IntervalIndex.from_breaks(bins.round(3)) expected = Categorical(intervals, ordered=True) expected = expected.take([0, 0, 0, 2, 3, 0, 0]) @@ -61,7 +61,7 @@ def test_no_right(): data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) result, bins = cut(data, 4, right=False, retbins=True) - intervals = IntervalIndex.from_breaks(bins.round(3), inclusive="left") + intervals = IntervalIndex.from_breaks(bins.round(3), closed="left") intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) expected = Categorical(intervals, ordered=True) @@ -86,7 +86,7 @@ def test_bins_from_interval_index_doc_example(): # Make sure we preserve the bins. ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) c = cut(ages, bins=[0, 18, 35, 70]) - expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)], "right") + expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) tm.assert_index_equal(c.categories, expected) result = cut([25, 20, 50], bins=c.categories) @@ -121,8 +121,7 @@ def test_bins_not_monotonic(): [ (Timestamp.min, Timestamp("2018-01-01")), (Timestamp("2018-01-01"), Timestamp.max), - ], - "right", + ] ), ), ( @@ -131,7 +130,7 @@ def test_bins_not_monotonic(): [np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64" ), IntervalIndex.from_tuples( - [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)], "right" + [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)] ), ), ( @@ -157,8 +156,7 @@ def test_bins_not_monotonic(): np.timedelta64(0, "ns"), np.timedelta64(np.iinfo(np.int64).max, "ns"), ), - ], - "right", + ] ), ), ], @@ -234,7 +232,7 @@ def test_labels(right, breaks, closed): arr = np.tile(np.arange(0, 1.01, 0.1), 4) result, bins = cut(arr, 4, retbins=True, right=right) - ex_levels = IntervalIndex.from_breaks(breaks, inclusive=closed) + ex_levels = IntervalIndex.from_breaks(breaks, closed=closed) tm.assert_index_equal(result.categories, ex_levels) @@ -250,7 +248,7 @@ def test_label_precision(): arr = np.arange(0, 0.73, 0.01) result = cut(arr, 4, precision=2) - ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72], "right") + ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72]) tm.assert_index_equal(result.categories, ex_levels) @@ -274,13 +272,13 @@ def test_inf_handling(): result = cut(data, bins) result_ser = cut(data_ser, bins) - ex_uniques = IntervalIndex.from_breaks(bins, "right") + ex_uniques = IntervalIndex.from_breaks(bins) tm.assert_index_equal(result.categories, ex_uniques) - assert result[5] == Interval(4, np.inf, "right") - assert result[0] == Interval(-np.inf, 2, "right") - assert result_ser[5] == Interval(4, np.inf, "right") - assert result_ser[0] == Interval(-np.inf, 2, "right") + assert result[5] == Interval(4, np.inf) + assert result[0] == Interval(-np.inf, 2) + assert result_ser[5] == Interval(4, np.inf) + assert result_ser[0] == Interval(-np.inf, 2) def test_cut_out_of_bounds(): @@ -357,7 +355,7 @@ def test_cut_return_intervals(): exp_bins[0] -= 0.008 expected = Series( - IntervalIndex.from_breaks(exp_bins, inclusive="right").take( + IntervalIndex.from_breaks(exp_bins, closed="right").take( [0, 0, 0, 1, 1, 1, 2, 2, 2] ) ).astype(CDT(ordered=True)) @@ -370,7 +368,7 @@ def test_series_ret_bins(): result, bins = cut(ser, 2, retbins=True) expected = Series( - IntervalIndex.from_breaks([-0.003, 1.5, 3], inclusive="right").repeat(2) + IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) @@ -444,8 +442,7 @@ def test_datetime_bin(conv): [ Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), - ], - "right", + ] ) ).astype(CDT(ordered=True)) @@ -491,8 +488,7 @@ def test_datetime_cut(data): Interval( Timestamp("2013-01-02 08:00:00"), Timestamp("2013-01-03 00:00:00") ), - ], - "right", + ] ) ).astype(CDT(ordered=True)) tm.assert_series_equal(Series(result), expected) @@ -535,8 +531,7 @@ def test_datetime_tz_cut(bins, box): Timestamp("2013-01-02 08:00:00", tz=tz), Timestamp("2013-01-03 00:00:00", tz=tz), ), - ], - "right", + ] ) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) @@ -690,8 +685,8 @@ def test_cut_no_warnings(): def test_cut_with_duplicated_index_lowest_included(): # GH 42185 expected = Series( - [Interval(-0.001, 2, inclusive="right")] * 3 - + [Interval(2, 4, inclusive="right"), Interval(-0.001, 2, inclusive="right")], + [Interval(-0.001, 2, closed="right")] * 3 + + [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")], index=[0, 1, 2, 3, 0], dtype="category", ).cat.as_ordered() @@ -711,16 +706,16 @@ def test_cut_with_nonexact_categorical_indices(): index = pd.CategoricalIndex( [ - Interval(-0.099, 9.9, inclusive="right"), - Interval(9.9, 19.8, inclusive="right"), - Interval(19.8, 29.7, inclusive="right"), - Interval(29.7, 39.6, inclusive="right"), - Interval(39.6, 49.5, inclusive="right"), - Interval(49.5, 59.4, inclusive="right"), - Interval(59.4, 69.3, inclusive="right"), - Interval(69.3, 79.2, inclusive="right"), - Interval(79.2, 89.1, inclusive="right"), - Interval(89.1, 99, inclusive="right"), + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), ], ordered=True, ) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index e2eed4358aaaa..0322ed161c83c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -301,7 +301,7 @@ def test_pivot_with_interval_index(self, interval_values, dropna): def test_pivot_with_interval_index_margins(self): # GH 25815 - ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2], "right") + ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2]) df = DataFrame( { "A": np.arange(4, 0, -1, dtype=np.intp), @@ -319,10 +319,7 @@ def test_pivot_with_interval_index_margins(self): result = pivot_tab["All"] expected = Series( [3, 7, 10], - index=Index( - [pd.Interval(0, 1, "right"), pd.Interval(1, 2, "right"), "All"], - name="C", - ), + index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"), name="All", dtype=np.intp, ) diff --git a/pandas/tests/reshape/test_qcut.py b/pandas/tests/reshape/test_qcut.py index 0f82bb736c069..f7c7204d02a49 100644 --- a/pandas/tests/reshape/test_qcut.py +++ b/pandas/tests/reshape/test_qcut.py @@ -76,8 +76,7 @@ def test_qcut_include_lowest(): Interval(2.25, 4.5), Interval(4.5, 6.75), Interval(6.75, 9), - ], - "right", + ] ) tm.assert_index_equal(ii.categories, ex_levels) @@ -92,7 +91,7 @@ def test_qcut_nas(): def test_qcut_index(): result = qcut([0, 2], 2) - intervals = [Interval(-0.001, 1, "right"), Interval(1, 2, "right")] + intervals = [Interval(-0.001, 1), Interval(1, 2)] expected = Categorical(intervals, ordered=True) tm.assert_categorical_equal(result, expected) @@ -128,11 +127,7 @@ def test_qcut_return_intervals(): res = qcut(ser, [0, 0.333, 0.666, 1]) exp_levels = np.array( - [ - Interval(-0.001, 2.664, "right"), - Interval(2.664, 5.328, "right"), - Interval(5.328, 8, "right"), - ] + [Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)] ) exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True)) tm.assert_series_equal(res, exp) @@ -188,7 +183,7 @@ def test_qcut_duplicates_bin(kwargs, msg): qcut(values, 3, **kwargs) else: result = qcut(values, 3, **kwargs) - expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)], "right") + expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)]) tm.assert_index_equal(result.categories, expected) @@ -203,7 +198,7 @@ def test_single_quantile(data, start, end, length, labels): result = qcut(ser, 1, labels=labels) if labels is None: - intervals = IntervalIndex([Interval(start, end)] * length, inclusive="right") + intervals = IntervalIndex([Interval(start, end)] * length, closed="right") expected = Series(intervals).astype(CDT(ordered=True)) else: expected = Series([0] * length, dtype=np.intp) @@ -222,7 +217,7 @@ def test_single_quantile(data, start, end, length, labels): def test_qcut_nat(ser): # see gh-19768 intervals = IntervalIndex.from_tuples( - [(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])], "right" + [(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])] ) expected = Series(Categorical(intervals, ordered=True)) @@ -252,8 +247,7 @@ def test_datetime_tz_qcut(bins): Timestamp("2013-01-02 08:00:00", tz=tz), Timestamp("2013-01-03 00:00:00", tz=tz), ), - ], - "right", + ] ) ).astype(CDT(ordered=True)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index c5644b2f36ead..192aaacbac2b5 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -13,22 +13,22 @@ @pytest.fixture def interval(): - return Interval(0, 1, "right") + return Interval(0, 1) class TestInterval: def test_properties(self, interval): - assert interval.inclusive == "right" + assert interval.closed == "right" assert interval.left == 0 assert interval.right == 1 assert interval.mid == 0.5 def test_repr(self, interval): - assert repr(interval) == "Interval(0, 1, inclusive='right')" + assert repr(interval) == "Interval(0, 1, closed='right')" assert str(interval) == "(0, 1]" - interval_left = Interval(0, 1, "left") - assert repr(interval_left) == "Interval(0, 1, inclusive='left')" + interval_left = Interval(0, 1, closed="left") + assert repr(interval_left) == "Interval(0, 1, closed='left')" assert str(interval_left) == "[0, 1)" def test_contains(self, interval): @@ -40,14 +40,14 @@ def test_contains(self, interval): assert 0 in interval_both assert 1 in interval_both - interval_neither = Interval(0, 1, "neither") + interval_neither = Interval(0, 1, closed="neither") assert 0 not in interval_neither assert 0.5 in interval_neither assert 1 not in interval_neither def test_equal(self): - assert Interval(0, 1, "right") == Interval(0, 1, "right") - assert Interval(0, 1, "right") != Interval(0, 1, "left") + assert Interval(0, 1) == Interval(0, 1, closed="right") + assert Interval(0, 1) != Interval(0, 1, closed="left") assert Interval(0, 1) != 0 def test_comparison(self): @@ -125,7 +125,7 @@ def test_is_empty(self, left, right, closed): iv = Interval(left, right, closed) assert iv.is_empty is False - # same endpoint is empty except when inclusive='both' (contains one point) + # same endpoint is empty except when closed='both' (contains one point) iv = Interval(left, left, closed) result = iv.is_empty expected = closed != "both" @@ -148,8 +148,8 @@ def test_construct_errors(self, left, right): Interval(left, right) def test_math_add(self, closed): - interval = Interval(0, 1, closed) - expected = Interval(1, 2, closed) + interval = Interval(0, 1, closed=closed) + expected = Interval(1, 2, closed=closed) result = interval + 1 assert result == expected @@ -169,8 +169,8 @@ def test_math_add(self, closed): interval + "foo" def test_math_sub(self, closed): - interval = Interval(0, 1, closed) - expected = Interval(-1, 0, closed) + interval = Interval(0, 1, closed=closed) + expected = Interval(-1, 0, closed=closed) result = interval - 1 assert result == expected @@ -187,8 +187,8 @@ def test_math_sub(self, closed): interval - "foo" def test_math_mult(self, closed): - interval = Interval(0, 1, closed) - expected = Interval(0, 2, closed) + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 2, closed=closed) result = interval * 2 assert result == expected @@ -209,8 +209,8 @@ def test_math_mult(self, closed): interval * "foo" def test_math_div(self, closed): - interval = Interval(0, 1, closed) - expected = Interval(0, 0.5, closed) + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 0.5, closed=closed) result = interval / 2.0 assert result == expected @@ -227,8 +227,8 @@ def test_math_div(self, closed): interval / "foo" def test_math_floordiv(self, closed): - interval = Interval(1, 2, closed) - expected = Interval(0, 1, closed) + interval = Interval(1, 2, closed=closed) + expected = Interval(0, 1, closed=closed) result = interval // 2 assert result == expected @@ -245,9 +245,9 @@ def test_math_floordiv(self, closed): interval // "foo" def test_constructor_errors(self): - msg = "invalid option for 'inclusive': foo" + msg = "invalid option for 'closed': foo" with pytest.raises(ValueError, match=msg): - Interval(0, 1, "foo") + Interval(0, 1, closed="foo") msg = "left side of interval must be <= right side" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 244ad9884b82a..90051405c6935 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -807,14 +807,9 @@ def test_index_putmask(self, obj, key, expected, val): pytest.param( # GH#45568 setting a valid NA value into IntervalDtype[int] should # cast to IntervalDtype[float] - Series(interval_range(1, 5, inclusive="right")), + Series(interval_range(1, 5)), Series( - [ - Interval(1, 2, "right"), - np.nan, - Interval(3, 4, "right"), - Interval(4, 5, "right"), - ], + [Interval(1, 2), np.nan, Interval(3, 4), Interval(4, 5)], dtype="interval[float64]", ), 1, @@ -1085,9 +1080,9 @@ class TestSetitemFloatIntervalWithIntIntervalValues(SetitemCastingEquivalents): def test_setitem_example(self): # Just a case here to make obvious what this test class is aimed at - idx = IntervalIndex.from_breaks(range(4), inclusive="right") + idx = IntervalIndex.from_breaks(range(4)) obj = Series(idx) - val = Interval(0.5, 1.5, "right") + val = Interval(0.5, 1.5) obj[0] = val assert obj.dtype == "Interval[float64, right]" @@ -1381,7 +1376,7 @@ def obj(self): @pytest.mark.parametrize( - "val", ["foo", Period("2016", freq="Y"), Interval(1, 2, inclusive="both")] + "val", ["foo", Period("2016", freq="Y"), Interval(1, 2, closed="both")] ) @pytest.mark.parametrize("exp_dtype", [object]) class TestPeriodIntervalCoercion(CoercionTest): @@ -1389,7 +1384,7 @@ class TestPeriodIntervalCoercion(CoercionTest): @pytest.fixture( params=[ period_range("2016-01-01", periods=3, freq="D"), - interval_range(1, 5, inclusive="right"), + interval_range(1, 5), ] ) def obj(self, request): @@ -1580,7 +1575,7 @@ def test_setitem_int_as_positional_fallback_deprecation(): # Once the deprecation is enforced, we will have # expected = Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.0, 4.1, 5.0]) - ii = IntervalIndex.from_breaks(range(10), inclusive="right")[::2] + ii = IntervalIndex.from_breaks(range(10))[::2] ser2 = Series(range(len(ii)), index=ii) expected2 = ser2.copy() expected2.iloc[-1] = 9 diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 01fe6a529a86f..f79714ae6455c 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1182,7 +1182,7 @@ def test_constructor_datetime64_bigendian(self): @pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray]) def test_construction_interval(self, interval_constructor): # construction from interval & array of intervals - intervals = interval_constructor.from_breaks(np.arange(3), inclusive="right") + intervals = interval_constructor.from_breaks(np.arange(3), closed="right") result = Series(intervals) assert result.dtype == "interval[int64, right]" tm.assert_index_equal(Index(result.values), Index(intervals)) @@ -1192,7 +1192,7 @@ def test_construction_interval(self, interval_constructor): ) def test_constructor_infer_interval(self, data_constructor): # GH 23563: consistent closed results in interval dtype - data = [Interval(0, 1, "right"), Interval(0, 2, "right"), None] + data = [Interval(0, 1), Interval(0, 2), None] result = Series(data_constructor(data)) expected = Series(IntervalArray(data)) assert result.dtype == "interval[float64, right]" @@ -1201,9 +1201,9 @@ def test_constructor_infer_interval(self, data_constructor): @pytest.mark.parametrize( "data_constructor", [list, np.array], ids=["list", "ndarray[object]"] ) - def test_constructor_interval_mixed_inclusive(self, data_constructor): - # GH 23563: mixed inclusive results in object dtype (not interval dtype) - data = [Interval(0, 1, inclusive="both"), Interval(0, 2, inclusive="neither")] + def test_constructor_interval_mixed_closed(self, data_constructor): + # GH 23563: mixed closed results in object dtype (not interval dtype) + data = [Interval(0, 1, closed="both"), Interval(0, 2, closed="neither")] result = Series(data_constructor(data)) assert result.dtype == object assert result.tolist() == data diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index def63c552e059..c1144e54cb945 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1127,26 +1127,19 @@ def test_value_counts(self): # assert isinstance(factor, n) result = algos.value_counts(factor) breaks = [-1.194, -0.535, 0.121, 0.777, 1.433] - index = IntervalIndex.from_breaks(breaks, inclusive="right").astype( - CDT(ordered=True) - ) + index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True)) expected = Series([1, 1, 1, 1], index=index) tm.assert_series_equal(result.sort_index(), expected.sort_index()) def test_value_counts_bins(self): s = [1, 2, 3, 4] result = algos.value_counts(s, bins=1) - expected = Series( - [4], index=IntervalIndex.from_tuples([(0.996, 4.0)], inclusive="right") - ) + expected = Series([4], index=IntervalIndex.from_tuples([(0.996, 4.0)])) tm.assert_series_equal(result, expected) result = algos.value_counts(s, bins=2, sort=False) expected = Series( - [2, 2], - index=IntervalIndex.from_tuples( - [(0.996, 2.5), (2.5, 4.0)], inclusive="right" - ), + [2, 2], index=IntervalIndex.from_tuples([(0.996, 2.5), (2.5, 4.0)]) ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index c3c5f2fdc9d29..6ff1a1c17b179 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -247,7 +247,7 @@ def test_assert_frame_equal_extension_dtype_mismatch(): def test_assert_frame_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 - left = DataFrame({"a": [pd.Interval(0, 1, "right")]}, dtype="interval") + left = DataFrame({"a": [pd.Interval(0, 1)]}, dtype="interval") right = left.astype(object) msg = ( diff --git a/pandas/tests/util/test_assert_interval_array_equal.py b/pandas/tests/util/test_assert_interval_array_equal.py index 29ebc00b2e69a..8cc4ade3d7e95 100644 --- a/pandas/tests/util/test_assert_interval_array_equal.py +++ b/pandas/tests/util/test_assert_interval_array_equal.py @@ -9,7 +9,7 @@ [ {"start": 0, "periods": 4}, {"start": 1, "periods": 5}, - {"start": 5, "end": 10, "inclusive": "left"}, + {"start": 5, "end": 10, "closed": "left"}, ], ) def test_interval_array_equal(kwargs): @@ -19,13 +19,13 @@ def test_interval_array_equal(kwargs): def test_interval_array_equal_closed_mismatch(): kwargs = {"start": 0, "periods": 5} - arr1 = interval_range(inclusive="left", **kwargs).values - arr2 = interval_range(inclusive="right", **kwargs).values + arr1 = interval_range(closed="left", **kwargs).values + arr2 = interval_range(closed="right", **kwargs).values msg = """\ IntervalArray are different -Attribute "inclusive" are different +Attribute "closed" are different \\[left\\]: left \\[right\\]: right""" diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index f8600956aa2f9..25f5b31eb4664 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -290,7 +290,7 @@ def test_assert_series_equal_extension_dtype_mismatch(): def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 - left = Series([pd.Interval(0, 1, "right")], dtype="interval") + left = Series([pd.Interval(0, 1)], dtype="interval") right = left.astype(object) msg = """Attributes of Series are different diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index 7e938e4648e97..2b5be2d48f382 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -16,7 +16,6 @@ import numpy as np -from pandas._typing import IntervalInclusiveType from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( @@ -494,7 +493,7 @@ def validate_endpoints(closed: str | None) -> tuple[bool, bool]: return left_closed, right_closed -def validate_inclusive(inclusive: IntervalInclusiveType | None) -> tuple[bool, bool]: +def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: """ Check that the `inclusive` argument is among {"both", "neither", "left", "right"}.
xref https://github.com/pandas-dev/pandas/issues/47365#issuecomment-1215904502 Reverts these PRs https://github.com/pandas-dev/pandas/pull/47775 https://github.com/pandas-dev/pandas/pull/47655 https://github.com/pandas-dev/pandas/pull/47646 https://github.com/pandas-dev/pandas/pull/47636 https://github.com/pandas-dev/pandas/pull/47637 https://github.com/pandas-dev/pandas/pull/47367 https://github.com/pandas-dev/pandas/pull/46522 https://github.com/pandas-dev/pandas/pull/46355 @phofl would appreciate a look since you did a lot of the cleanup post deprecation (and there were tons of merge conflicts reverting along the way)
https://api.github.com/repos/pandas-dev/pandas/pulls/48116
2022-08-17T00:42:41Z
2022-08-18T15:41:46Z
2022-08-18T15:41:46Z
2022-08-18T15:41:50Z
DEPR: inplace kwarg in set_index
diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index f939945fc6cda..92729a16c6a30 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1723,13 +1723,12 @@ the given columns to a MultiIndex: frame Other options in ``set_index`` allow you not drop the index columns or to add -the index in-place (without creating a new object): +the index without creating a copy of the underlying data: .. ipython:: python data.set_index('c', drop=False) - data.set_index(['a', 'b'], inplace=True) - data + data.set_index(['a', 'b'], copy=False) Reset the index ~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0ceac8aeb9db8..64f3365d89627 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -849,6 +849,7 @@ Other Deprecations - Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`) - Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) - Fixed up warning message of deprecation of :meth:`MultiIndex.lesort_depth` as public method, as the message previously referred to :meth:`MultiIndex.is_lexsorted` instead (:issue:`38701`) +- Deprecated the ``inplace`` keyword in :meth:`DataFrame.set_index`, use ``df = df.set_index(..., copy=False)`` instead (:issue:`48115`) - Deprecated the ``sort_columns`` argument in :meth:`DataFrame.plot` and :meth:`Series.plot` (:issue:`47563`). .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7a4f41da5840c..d32b8ced3cda6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5818,7 +5818,7 @@ def set_index( *, drop: bool = ..., append: bool = ..., - inplace: Literal[False] = ..., + inplace: Literal[False] | lib.NoDefault = ..., verify_integrity: bool = ..., copy: bool | lib.NoDefault = ..., ) -> DataFrame: @@ -5843,7 +5843,7 @@ def set_index( keys, drop: bool = True, append: bool = False, - inplace: bool = False, + inplace: bool | lib.NoDefault = lib.no_default, verify_integrity: bool = False, copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame | None: @@ -5868,6 +5868,9 @@ def set_index( Whether to append columns to existing index. inplace : bool, default False Whether to modify the DataFrame rather than creating a new one. + + .. deprecated:: 1.5.0 + verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this @@ -5941,7 +5944,18 @@ def set_index( 3 9 7 2013 84 4 16 10 2014 31 """ - inplace = validate_bool_kwarg(inplace, "inplace") + if inplace is not lib.no_default: + inplace = validate_bool_kwarg(inplace, "inplace") + warnings.warn( + "The 'inplace' keyword in DataFrame.set_index is deprecated " + "and will be removed in a future version. Use " + "`df = df.set_index(..., copy=False)` instead.", + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) + else: + inplace = False + if inplace: if copy is not lib.no_default: raise ValueError("Cannot specify copy when inplace=True") diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 50aaac211c7a5..6ad69c8fa0e8d 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -782,9 +782,9 @@ def get_result(self, copy: bool = True) -> DataFrame: if self.indicator: result = self._indicator_post_merge(result) - self._maybe_add_join_keys(result, left_indexer, right_indexer) + result = self._maybe_add_join_keys(result, left_indexer, right_indexer) - self._maybe_restore_index_levels(result) + result = self._maybe_restore_index_levels(result) self._maybe_drop_cross_column(result, self._cross) @@ -851,7 +851,7 @@ def _indicator_post_merge(self, result: DataFrame) -> DataFrame: result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) return result - def _maybe_restore_index_levels(self, result: DataFrame) -> None: + def _maybe_restore_index_levels(self, result: DataFrame) -> DataFrame: """ Restore index levels specified as `on` parameters @@ -869,7 +869,7 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None: Returns ------- - None + DataFrame """ names_to_restore = [] for name, left_key, right_key in zip( @@ -893,14 +893,15 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None: names_to_restore.append(name) if names_to_restore: - result.set_index(names_to_restore, inplace=True) + result = result.set_index(names_to_restore, copy=False) + return result def _maybe_add_join_keys( self, result: DataFrame, left_indexer: np.ndarray | None, right_indexer: np.ndarray | None, - ) -> None: + ) -> DataFrame: left_has_missing = None right_has_missing = None @@ -996,11 +997,12 @@ def _maybe_add_join_keys( for level_name in result.index.names ] - result.set_index(idx_list, inplace=True) + result = result.set_index(idx_list, copy=False) else: result.index = Index(key_col, name=name) else: result.insert(i, name or f"key_{i}", key_col) + return result def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """return the join indexers""" @@ -1768,7 +1770,8 @@ def get_result(self, copy: bool = True) -> DataFrame: result = self._reindex_and_concat( join_index, left_join_indexer, right_join_indexer, copy=copy ) - self._maybe_add_join_keys(result, left_indexer, right_indexer) + + result = self._maybe_add_join_keys(result, left_indexer, right_indexer) return result diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 2305c209936b6..bb62b1405da3a 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -117,7 +117,7 @@ def _finalize_output(self, frame: DataFrame) -> DataFrame: # String case if item not in frame.columns: raise ValueError(f"Index {item} invalid") - frame.set_index(self.index_col, drop=True, inplace=True) + frame = frame.set_index(self.index_col, drop=True, copy=False) # Clear names if headerless and no name given if self.header is None and not multi_index_named: frame.index.names = [None] * len(frame.index.names) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f7d5fb9270247..a857972c576e0 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4663,7 +4663,7 @@ def read( columns.insert(0, n) s = super().read(where=where, columns=columns, start=start, stop=stop) if is_multi_index: - s.set_index(self.levels, inplace=True) + s = s.set_index(self.levels, copy=False) s = s.iloc[:, 0] diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 2b835a1e7ebed..6c4397ff52b59 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -152,7 +152,7 @@ def _wrap_result( frame = _parse_date_columns(frame, parse_dates) if index_col is not None: - frame.set_index(index_col, inplace=True) + frame = frame.set_index(index_col, copy=False) return frame @@ -979,7 +979,7 @@ def _query_iterator( self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: - self.frame.set_index(self.index, inplace=True) + self.frame = self.frame.set_index(self.index, copy=False) yield self.frame @@ -1020,7 +1020,7 @@ def read( self._harmonize_columns(parse_dates=parse_dates) if self.index is not None: - self.frame.set_index(self.index, inplace=True) + self.frame = self.frame.set_index(self.index, copy=False) return self.frame diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index 783bef3206d58..6bfe07feb010d 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -387,8 +387,8 @@ def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype ) df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype) - df.set_index(["a", "b"], inplace=True) - df2.set_index(["a", "b"], inplace=True) + df = df.set_index(["a", "b"], copy=False) + df2 = df2.set_index(["a", "b"], copy=False) result = df.combine_first(df2) expected = DataFrame( {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 9392d3c146942..b404c34a4ddb8 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -40,9 +40,11 @@ def test_set_index_copy(self): msg = "Cannot specify copy when inplace=True" with pytest.raises(ValueError, match=msg): - df.set_index("A", inplace=True, copy=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace'"): + df.set_index("A", inplace=True, copy=True) with pytest.raises(ValueError, match=msg): - df.set_index("A", inplace=True, copy=False) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace'"): + df.set_index("A", inplace=True, copy=False) def test_set_index_multiindex(self): # segfault in GH#3308 @@ -197,7 +199,10 @@ def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys): if inplace: result = df.copy() - return_value = result.set_index(keys, drop=drop, inplace=True) + with tm.assert_produces_warning( + FutureWarning, match="The 'inplace' keyword" + ): + return_value = result.set_index(keys, drop=drop, inplace=True) assert return_value is None else: result = df.set_index(keys, drop=drop) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 09ea9ae04320b..af092d433a846 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -244,7 +244,8 @@ def _check_f(base, f): # set_index f = lambda x: x.set_index("a", inplace=True) - _check_f(data.copy(), f) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + _check_f(data.copy(), f) # reset_index f = lambda x: x.reset_index(inplace=True) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 35335c54cd41e..aedc9270fd37c 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -436,7 +436,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] @@ -449,7 +450,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] @@ -463,7 +465,8 @@ def test_date_index_query_with_NaT_duplicates(self): d["dates3"] = date_range("1/1/2014", periods=n) df = DataFrame(d) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)] @@ -794,7 +797,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser @@ -809,7 +813,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser @@ -824,7 +829,8 @@ def test_date_index_query_with_NaT_duplicates(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - return_value = df.set_index("dates1", inplace=True, drop=True) + with tm.assert_produces_warning(FutureWarning, match="The 'inplace' keyword"): + return_value = df.set_index("dates1", inplace=True, drop=True) assert return_value is None msg = r"'BoolOp' nodes are not implemented" with pytest.raises(NotImplementedError, match=msg): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index b064c12f89c21..5a66d13efce65 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -678,7 +678,7 @@ def test_apply_groupby_datetimeindex(): result = df.groupby("Name").sum() expected = DataFrame({"Name": ["A", "B", "C"], "Value": [10, 50, 90]}) - expected.set_index("Name", inplace=True) + expected = expected.set_index("Name", copy=False) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index a2461c68f50a6..4b9f5deb40849 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -98,7 +98,7 @@ def test_builtins_apply(keys, f): if f != sum: expected = gb.agg(fname).reset_index() - expected.set_index(keys, inplace=True, drop=False) + expected = expected.set_index(keys, copy=False, drop=False) tm.assert_frame_equal(result, expected, check_dtype=False) tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)()) @@ -454,7 +454,7 @@ def test_groupby_non_arithmetic_agg_types(dtype, method, data): df_out = DataFrame(exp) df_out["b"] = df_out.b.astype(out_type) - df_out.set_index("a", inplace=True) + df_out = df_out.set_index("a", copy=False) grpd = df.groupby("a") t = getattr(grpd, method)(*data["args"]) diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py index eed27cd450e9c..b1deec12b1adb 100644 --- a/pandas/tests/indexes/multi/test_reshape.py +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -35,7 +35,7 @@ def test_insert(idx): idx.insert(0, ("foo2",)) left = pd.DataFrame([["a", "b", 0], ["b", "d", 1]], columns=["1st", "2nd", "3rd"]) - left.set_index(["1st", "2nd"], inplace=True) + left = left.set_index(["1st", "2nd"], copy=False) ts = left["3rd"].copy(deep=True) left.loc[("b", "x"), "3rd"] = 2 @@ -65,7 +65,7 @@ def test_insert(idx): ], columns=["1st", "2nd", "3rd"], ) - right.set_index(["1st", "2nd"], inplace=True) + right = right.set_index(["1st", "2nd"], copy=False) # FIXME data types changes to float because # of intermediate nan insertion; tm.assert_frame_equal(left, right, check_dtype=False) diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py index e8c766d489813..16b0ae2c63eb1 100644 --- a/pandas/tests/indexing/multiindex/test_indexing_slow.py +++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -60,15 +60,18 @@ def validate(mi, df, key): assert key[: i + 1] in mi.index right = df[mask].copy() + msg = "The 'inplace' keyword in DataFrame.set_index is deprecated" if i + 1 != len(key): # partial key return_value = right.drop(cols[: i + 1], axis=1, inplace=True) assert return_value is None - return_value = right.set_index(cols[i + 1 : -1], inplace=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = right.set_index(cols[i + 1 : -1], inplace=True) assert return_value is None tm.assert_frame_equal(mi.loc[key[: i + 1]], right) else: # full key - return_value = right.set_index(cols[:-1], inplace=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = right.set_index(cols[:-1], inplace=True) assert return_value is None if len(right) == 1: # single hit right = Series( diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 08e15545cb998..100b3e55b03c5 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -131,7 +131,7 @@ def test_multiindex_complex(self): "z": non_complex_data, } ) - result.set_index(["x", "y"], inplace=True) + result = result.set_index(["x", "y"], copy=False) expected = DataFrame( {"z": non_complex_data}, index=MultiIndex.from_arrays( diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index 40a50c55de2a4..6c6ea4c8b0e0a 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -137,7 +137,7 @@ def test_append_series(setup_path): mi["B"] = np.arange(len(mi)) mi["C"] = "foo" mi.loc[3:5, "C"] = "bar" - mi.set_index(["C", "B"], inplace=True) + mi = mi.set_index(["C", "B"], copy=False) s = mi.stack() s.index = s.index.droplevel(2) store.append("mi", s) @@ -326,7 +326,7 @@ def test_append_with_different_block_ordering(setup_path): a = df.pop("A") df["A"] = a - df.set_index("index", inplace=True) + df = df.set_index("index", copy=False) store.append("df", df) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index ee55837324f20..c2c47672b190d 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -771,7 +771,7 @@ def _roundtrip(self, test_frame1): assert self.pandasSQL.to_sql(test_frame1, "test_frame_roundtrip") == 4 result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip") - result.set_index("level_0", inplace=True) + result = result.set_index("level_0", copy=False) # result.index.astype(int) result.index.name = None @@ -928,7 +928,7 @@ def test_roundtrip(self, test_frame1): # HACK! result.index = test_frame1.index - result.set_index("level_0", inplace=True) + result = result.set_index("level_0", copy=False) result.index.astype(int) result.index.name = None tm.assert_frame_equal(result, test_frame1) diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index b38c9adb0a893..f804f7df06bb8 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -1550,8 +1550,8 @@ def test_errorbar_with_partial_columns(self): self._check_has_errorbars(ax, xerr=0, yerr=2) ix = date_range("1/1/2000", periods=10, freq="M") - df.set_index(ix, inplace=True) - df_err.set_index(ix, inplace=True) + df = df.set_index(ix, copy=False) + df_err = df_err.set_index(ix, copy=False) ax = _check_plot_works(df.plot, yerr=df_err, kind="line") self._check_has_errorbars(ax, xerr=0, yerr=2) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index d97c6a3dacdc3..52990b4feda38 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -409,7 +409,7 @@ def test_join_hierarchical_mixed(self): df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"]) new_df = df.groupby(["a"]).agg({"b": [np.mean, np.sum]}) other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"]) - other_df.set_index("a", inplace=True) + other_df = other_df.set_index("a", copy=False) # GH 9455, 12219 msg = "merging between different levels is deprecated" with tm.assert_produces_warning(FutureWarning, match=msg): diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 116fb298df61d..b7365f98edf61 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1347,7 +1347,7 @@ def test_merge_on_index_with_more_values(self, how, index, expected_index): ], columns=["a", "key", "b"], ) - expected.set_index(expected_index, inplace=True) + expected = expected.set_index(expected_index, copy=False) tm.assert_frame_equal(result, expected) def test_merge_right_index_right(self): diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index 0dbe45eeb1e82..cc8019c50bc1e 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -130,7 +130,7 @@ def run_asserts(left, right, sort): left["4th"] = bind_cols(left) right["5th"] = -bind_cols(right) - right.set_index(icols, inplace=True) + right = right.set_index(icols, copy=False) run_asserts(left, right, sort) @@ -143,7 +143,7 @@ def run_asserts(left, right, sort): i = np.random.permutation(len(left)) right = left.iloc[i, :-1] right["5th"] = -bind_cols(right) - right.set_index(icols, inplace=True) + right = right.set_index(icols, copy=False) run_asserts(left, right, sort) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 2f4fffe57593f..7a3e18c64f366 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -247,7 +247,9 @@ def test_timedelta_assignment(): def test_underlying_data_conversion(): # GH 4080 df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]}) - return_value = df.set_index(["a", "b", "c"], inplace=True) + msg = "The 'inplace' keyword" + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = df.set_index(["a", "b", "c"], inplace=True) assert return_value is None s = Series([1], index=[(2, 2, 2)]) df["val"] = 0 @@ -257,7 +259,8 @@ def test_underlying_data_conversion(): expected = DataFrame( {"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3], "val": [0, 1, 0]} ) - return_value = expected.set_index(["a", "b", "c"], inplace=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = expected.set_index(["a", "b", "c"], inplace=True) assert return_value is None tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index c9ec2985488be..943ffc10f52c8 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -706,7 +706,7 @@ def test_rolling_window_as_string(center, expected_data): data = npr.randint(1, high=100, size=len(days)) df = DataFrame({"DateCol": days, "metric": data}) - df.set_index("DateCol", inplace=True) + df = df.set_index("DateCol", copy=False) result = df.rolling(window="21D", min_periods=2, closed="left", center=center)[ "metric" ].agg("max")
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48115
2022-08-16T19:26:18Z
2022-08-17T21:19:39Z
2022-08-17T21:19:39Z
2022-08-17T21:43:39Z
Fix spelling error: 'smaller than' -> 'smaller than'
diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 6c32cc98df9ac..4434ed5a8b5f7 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -262,7 +262,7 @@ def use_numba_cb(key) -> None: pc_chop_threshold_doc = """ : float or None - if set to a float value, all float values smaller then the given threshold + if set to a float value, all float values smaller than the given threshold will be displayed as exactly 0 by repr and friends. """
null
https://api.github.com/repos/pandas-dev/pandas/pulls/48114
2022-08-16T19:02:23Z
2022-08-16T19:03:57Z
2022-08-16T19:03:57Z
2022-08-16T19:04:04Z
DOC: update contributing to pandas documentation
diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index e76197e302ca4..fe4cb005248f3 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -194,30 +194,10 @@ Doing 'git status' again should give something like:: # modified: /relative/path/to/file-you-added.py # -Finally, commit your changes to your local repository with an explanatory message. pandas -uses a convention for commit message prefixes and layout. Here are -some common prefixes along with general guidelines for when to use them: +Finally, commit your changes to your local repository with an explanatory commit +message:: -* ENH: Enhancement, new functionality -* BUG: Bug fix -* DOC: Additions/updates to documentation -* TST: Additions/updates to tests -* BLD: Updates to the build process/scripts -* PERF: Performance improvement -* TYP: Type annotations -* CLN: Code cleanup - -The following defines how a commit message should be structured. Please reference the -relevant GitHub issues in your commit message using GH1234 or #1234. Either style -is fine, but the former is generally preferred: - -* a subject line with ``< 80`` chars. -* One blank line. -* Optionally, a commit message body. - -Now you can commit your changes in your local repository:: - - git commit -m + git commit -m "your commit message goes here" .. _contributing.push-code: @@ -262,16 +242,28 @@ double check your branch changes against the branch it was based on: Finally, make the pull request ------------------------------ -If everything looks good, you are ready to make a pull request. A pull request is how +If everything looks good, you are ready to make a pull request. A pull request is how code from a local repository becomes available to the GitHub community and can be looked -at and eventually merged into the main version. This pull request and its associated +at and eventually merged into the main version. This pull request and its associated changes will eventually be committed to the main branch and available in the next -release. To submit a pull request: +release. To submit a pull request: #. Navigate to your repository on GitHub -#. Click on the ``Pull Request`` button +#. Click on the ``Compare & pull request`` button #. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks okay one last time +#. Write a descriptive title that includes prefixes. pandas uses a convention for title + prefixes. Here are some common ones along with general guidelines for when to use them: + + * ENH: Enhancement, new functionality + * BUG: Bug fix + * DOC: Additions/updates to documentation + * TST: Additions/updates to tests + * BLD: Updates to the build process/scripts + * PERF: Performance improvement + * TYP: Type annotations + * CLN: Code cleanup + #. Write a description of your changes in the ``Preview Discussion`` tab #. Click ``Send Pull Request``.
- [x] closes #48035 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. I removed the part in the [Committing your code](https://pandas.pydata.org/docs/development/contributing.html#id9) section where it is said that commit messages should include the number of the GitHub issue the commit is contributing towards. I updated the pull request template to include cross references so that it's more clear how people should make use of referencing GitHub issues. I fixed the code snippet `git commit -m` which causes an error and updated the documentation to include both commit message versions, the one line commit message and the more detailed commit message that can be done with an editor. Let me know if you would like to keep both versions or would like further changes. Additionally, I moved the section about the prefixes to the [Finally, make the pull request](https://pandas.pydata.org/docs/development/contributing.html#id12) section of the documentation. Let me know if the prefixes need to be updated.
https://api.github.com/repos/pandas-dev/pandas/pulls/48113
2022-08-16T18:51:04Z
2022-09-01T20:24:37Z
2022-09-01T20:24:37Z
2022-10-13T16:59:59Z
revert Timestamp and Timedelta constructors typing allowing NaTType return
diff --git a/pandas/_libs/tslibs/nattype.pyi b/pandas/_libs/tslibs/nattype.pyi index 0aa80330b15bc..e9ae46cee7aec 100644 --- a/pandas/_libs/tslibs/nattype.pyi +++ b/pandas/_libs/tslibs/nattype.pyi @@ -67,8 +67,17 @@ class NaTType: def round(self) -> NaTType: ... def floor(self) -> NaTType: ... def ceil(self) -> NaTType: ... - def tz_convert(self) -> NaTType: ... - def tz_localize(self) -> NaTType: ... + @property + def tzinfo(self) -> None: ... + @property + def tz(self) -> None: ... + def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ... + def tz_localize( + self, + tz: _tzinfo | str | None, + ambiguous: str = ..., + nonexistent: str = ..., + ) -> NaTType: ... def replace( self, year: int | None = ..., diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index b05b0ba636251..b3dd5b7907cad 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -1203,6 +1203,13 @@ default 'raise' NaT """, ) + @property + def tz(self) -> None: + return None + + @property + def tzinfo(self) -> None: + return None c_NaT = NaTType() # C-visible diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index d100108e7dd2b..1fb2bf1b45888 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -82,15 +82,13 @@ class Timedelta(timedelta): max: ClassVar[Timedelta] resolution: ClassVar[Timedelta] value: int # np.int64 - def __new__( + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] cls: type[_S], value=..., unit: str | None = ..., **kwargs: float | np.integer | np.floating, - ) -> _S: ... - # GH 46171 - # While Timedelta can return pd.NaT, having the constructor return - # a Union with NaTType makes things awkward for users of pandas + ) -> _S | NaTType: ... @classmethod def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ... @property diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index f39d1f44d82c0..e4be7fda43005 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -16,6 +16,7 @@ import numpy as np from pandas._libs.tslibs import ( BaseOffset, + NaTType, Period, Tick, Timedelta, @@ -31,7 +32,8 @@ class Timestamp(datetime): resolution: ClassVar[Timedelta] value: int # np.int64 - def __new__( + # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]") + def __new__( # type: ignore[misc] cls: type[_DatetimeT], ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ..., freq: int | None | str | BaseOffset = ..., @@ -48,10 +50,7 @@ class Timestamp(datetime): tzinfo: _tzinfo | None = ..., *, fold: int | None = ..., - ) -> _DatetimeT: ... - # GH 46171 - # While Timestamp can return pd.NaT, having the constructor return - # a Union with NaTType makes things awkward for users of pandas + ) -> _DatetimeT | NaTType: ... def _set_freq(self, freq: BaseOffset | None) -> None: ... @classmethod def _from_value_and_reso( diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 1a07c02f4024a..55577af7be9d9 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -190,7 +190,7 @@ def test_nat_iso_format(get_nat): @pytest.mark.parametrize( "klass,expected", [ - (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period", "tz"]), + (Timestamp, ["freqstr", "normalize", "to_julian_date", "to_period"]), ( Timedelta, [ diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json index c482aa32600fb..3f20611f2c46d 100644 --- a/pyright_reportGeneralTypeIssues.json +++ b/pyright_reportGeneralTypeIssues.json @@ -16,6 +16,7 @@ "pandas/util/version", # and all files that currently don't pass "pandas/_testing/__init__.py", + "pandas/_testing/_hypothesis.py", "pandas/core/algorithms.py", "pandas/core/apply.py", "pandas/core/array_algos/take.py", @@ -57,6 +58,7 @@ "pandas/core/indexes/multi.py", "pandas/core/indexes/numeric.py", "pandas/core/indexes/period.py", + "pandas/core/indexes/timedeltas.py", "pandas/core/indexing.py", "pandas/core/internals/api.py", "pandas/core/internals/array_manager.py",
Re discussion at https://github.com/pandas-dev/pandas/pull/46171#issuecomment-1216906549 Has `Timestamp` and `Timedelta` constructors return union that includes `NaTType`
https://api.github.com/repos/pandas-dev/pandas/pulls/48112
2022-08-16T17:42:26Z
2022-08-17T00:48:36Z
2022-08-17T00:48:36Z
2023-02-13T20:50:28Z
ENH: Add cumulative methods to ea
diff --git a/doc/source/reference/extensions.rst b/doc/source/reference/extensions.rst index ce8d8d5c2ca10..595b415ff7342 100644 --- a/doc/source/reference/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -32,6 +32,7 @@ objects. .. autosummary:: :toctree: api/ + api.extensions.ExtensionArray._accumulate api.extensions.ExtensionArray._concat_same_type api.extensions.ExtensionArray._formatter api.extensions.ExtensionArray._from_factorized diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index e16ef0857685d..fdaf46e4ca480 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -76,6 +76,7 @@ Other enhancements - Added ``index`` parameter to :meth:`DataFrame.to_dict` (:issue:`46398`) - Added support for extension array dtypes in :func:`merge` (:issue:`44240`) - Added metadata propagation for binary operators on :class:`DataFrame` (:issue:`28283`) +- Added ``cumsum``, ``cumprod``, ``cummin`` and ``cummax`` to the ``ExtensionArray`` interface via ``_accumulate`` (:issue:`28385`) - :class:`.CategoricalConversionWarning`, :class:`.InvalidComparison`, :class:`.InvalidVersion`, :class:`.LossySetitemError`, and :class:`.NoBufferPresent` are now exposed in ``pandas.errors`` (:issue:`27656`) - Fix ``test`` optional_extra by adding missing test package ``pytest-asyncio`` (:issue:`48361`) - :func:`DataFrame.astype` exception message thrown improved to include column name when type conversion is not possible. (:issue:`47571`) diff --git a/pandas/conftest.py b/pandas/conftest.py index 05877ddf6e223..0d6af91d32dea 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1123,6 +1123,17 @@ def all_logical_operators(request): return request.param +_all_numeric_accumulations = ["cumsum", "cumprod", "cummin", "cummax"] + + [email protected](params=_all_numeric_accumulations) +def all_numeric_accumulations(request): + """ + Fixture for numeric accumulation names + """ + return request.param + + # ---------------------------------------------------------------- # Data sets/files # ---------------------------------------------------------------- diff --git a/pandas/core/array_algos/masked_accumulations.py b/pandas/core/array_algos/masked_accumulations.py new file mode 100644 index 0000000000000..07113128e0947 --- /dev/null +++ b/pandas/core/array_algos/masked_accumulations.py @@ -0,0 +1,92 @@ +""" +masked_accumulations.py is for accumulation algorithms using a mask-based approach +for missing values. +""" + +from __future__ import annotations + +from typing import Callable + +import numpy as np + +from pandas._typing import npt + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_integer_dtype, +) + + +def _cum_func( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, +): + """ + Accumulations for 1D masked array. + + We will modify values in place to replace NAs with the appropriate fill value. + + Parameters + ---------- + func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + """ + dtype_info: np.iinfo | np.finfo + if is_float_dtype(values): + dtype_info = np.finfo(values.dtype.type) + elif is_integer_dtype(values): + dtype_info = np.iinfo(values.dtype.type) + elif is_bool_dtype(values): + # Max value of bool is 1, but since we are setting into a boolean + # array, 255 is fine as well. Min value has to be 0 when setting + # into the boolean array. + dtype_info = np.iinfo(np.uint8) + else: + raise NotImplementedError( + f"No masked accumulation defined for dtype {values.dtype.type}" + ) + try: + fill_value = { + np.cumprod: 1, + np.maximum.accumulate: dtype_info.min, + np.cumsum: 0, + np.minimum.accumulate: dtype_info.max, + }[func] + except KeyError: + raise NotImplementedError( + f"No accumulation for {func} implemented on BaseMaskedArray" + ) + + values[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + values = func(values) + return values, mask + + +def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumsum, values, mask, skipna=skipna) + + +def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumprod, values, mask, skipna=skipna) + + +def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) + + +def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index f11d031b2f622..c36728391ba21 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -133,6 +133,7 @@ class ExtensionArray: tolist unique view + _accumulate _concat_same_type _formatter _from_factorized @@ -182,8 +183,9 @@ class ExtensionArray: as they only compose abstract methods. Still, a more efficient implementation may be available, and these methods can be overridden. - One can implement methods to handle array reductions. + One can implement methods to handle array accumulations or reductions. + * _accumulate * _reduce One can implement methods to handle parsing from strings that will be used @@ -1368,6 +1370,38 @@ def _concat_same_type( def _can_hold_na(self) -> bool: return self.dtype._can_hold_na + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> ExtensionArray: + """ + Return an ExtensionArray performing an accumulation operation. + + The underlying data type might change. + + Parameters + ---------- + name : str + Name of the function, supported values are: + - cummin + - cummax + - cumsum + - cumprod + skipna : bool, default True + If True, skip NA values. + **kwargs + Additional keyword arguments passed to the accumulation function. + Currently, there is no supported kwarg. + + Returns + ------- + array + + Raises + ------ + NotImplementedError : subclass does not define accumulations + """ + raise NotImplementedError(f"cannot perform {name} with type {self.dtype}") + def _reduce(self, name: str, *, skipna: bool = True, **kwargs): """ Return a scalar result of performing the reduction operation. diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 35b9de3f7af93..8ac665b1b2e11 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -26,6 +26,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops +from pandas.core.array_algos import masked_accumulations from pandas.core.arrays.masked import ( BaseMaskedArray, BaseMaskedDtype, @@ -378,3 +379,19 @@ def _logical_method(self, other, op): # i.e. BooleanArray return self._maybe_mask_result(result, mask) + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + if name in ("cummin", "cummax"): + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + return type(self)(data, mask, copy=False) + else: + from pandas.core.arrays import IntegerArray + + return IntegerArray(data.astype(int), mask)._accumulate( + name, skipna=skipna, **kwargs + ) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f9ff702a608a4..69b7d5a220d24 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1352,6 +1352,27 @@ def _addsub_object_array(self, other: np.ndarray, op): result = result.reshape(self.shape) return result + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs): + + if is_period_dtype(self.dtype): + data = self + else: + # Incompatible types in assignment (expression has type + # "ndarray[Any, Any]", variable has type "DatetimeLikeArrayMixin" + data = self._ndarray.copy() # type: ignore[assignment] + + if name in {"cummin", "cummax"}: + func = np.minimum.accumulate if name == "cummin" else np.maximum.accumulate + result = cast(np.ndarray, nanops.na_accum_func(data, func, skipna=skipna)) + + # error: Unexpected keyword argument "freq" for + # "_simple_new" of "NDArrayBacked" [call-arg] + return type(self)._simple_new( + result, freq=self.freq, dtype=self.dtype # type: ignore[call-arg] + ) + + raise TypeError(f"Accumulation {name} not supported for {type(self)}") + @unpack_zerodim_and_defer("__add__") def __add__(self, other): other_dtype = getattr(other, "dtype", None) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 3aa6a12160b73..3071016bb3bda 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -73,7 +73,10 @@ isin, take, ) -from pandas.core.array_algos import masked_reductions +from pandas.core.array_algos import ( + masked_accumulations, + masked_reductions, +) from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray @@ -1328,3 +1331,14 @@ def all(self, *, skipna: bool = True, **kwargs): return result else: return self.dtype.na_value + + def _accumulate( + self, name: str, *, skipna: bool = True, **kwargs + ) -> BaseMaskedArray: + data = self._data + mask = self._mask + + op = getattr(masked_accumulations, name) + data, mask = op(data, mask, skipna=skipna, **kwargs) + + return type(self)(data, mask, copy=False) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 334e5437c2f70..e68b56c57c176 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -410,6 +410,23 @@ def std( return self._box_func(result) return self._from_backing_data(result) + # ---------------------------------------------------------------- + # Accumulations + + def _accumulate(self, name: str, *, skipna: bool = True, **kwargs): + + data = self._ndarray.copy() + + if name in {"cumsum", "cumprod"}: + # TODO: cumprod should not work here GH#48111 + func = np.cumsum if name == "cumsum" else np.cumprod + result = cast(np.ndarray, nanops.na_accum_func(data, func, skipna=skipna)) + + return type(self)._simple_new(result, freq=None, dtype=self.dtype) + + else: + return super()._accumulate(name, skipna=skipna, **kwargs) + # ---------------------------------------------------------------- # Rendering Methods diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c5a931fe29ab1..46330ca22ccdb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10828,7 +10828,11 @@ def _accum_func( def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values - result = nanops.na_accum_func(values, func, skipna=skipna) + result: np.ndarray | ExtensionArray + if isinstance(values, ExtensionArray): + result = values._accumulate(name, skipna=skipna, **kwargs) + else: + result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 571ab3dca1efc..7e765cc5342d1 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -41,6 +41,7 @@ class TestMyDtype(BaseDtypeTests): ``assert_series_equal`` on your base test class. """ +from pandas.tests.extension.base.accumulate import BaseAccumulateTests # noqa from pandas.tests.extension.base.casting import BaseCastingTests # noqa from pandas.tests.extension.base.constructors import BaseConstructorsTests # noqa from pandas.tests.extension.base.dim2 import ( # noqa diff --git a/pandas/tests/extension/base/accumulate.py b/pandas/tests/extension/base/accumulate.py new file mode 100644 index 0000000000000..868172f930844 --- /dev/null +++ b/pandas/tests/extension/base/accumulate.py @@ -0,0 +1,37 @@ +import pytest + +import pandas as pd +from pandas.tests.extension.base.base import BaseExtensionTests + + +class BaseAccumulateTests(BaseExtensionTests): + """ + Accumulation specific tests. Generally these only + make sense for numeric/boolean operations. + """ + + def check_accumulate(self, s, op_name, skipna): + result = getattr(s, op_name)(skipna=skipna) + + if result.dtype == pd.Float32Dtype() and op_name == "cumprod" and skipna: + pytest.skip( + f"Float32 precision lead to large differences with op {op_name} " + f"and skipna={skipna}" + ) + + expected = getattr(s.astype("float64"), op_name)(skipna=skipna) + self.assert_series_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): + op_name = all_numeric_accumulations + ser = pd.Series(data) + + with pytest.raises(NotImplementedError): + getattr(ser, op_name)(skipna=skipna) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna): + op_name = all_numeric_accumulations + ser = pd.Series(data) + self.check_accumulate(ser, op_name, skipna) diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 9646ade43e1d7..b611701e4e429 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -16,6 +16,8 @@ import numpy as np import pytest +from pandas.core.dtypes.common import is_bool_dtype + import pandas as pd import pandas._testing as tm from pandas.core.arrays.boolean import BooleanDtype @@ -393,6 +395,19 @@ class TestUnaryOps(base.BaseUnaryOpsTests): pass +class TestAccumulation(base.BaseAccumulateTests): + def check_accumulate(self, s, op_name, skipna): + result = getattr(s, op_name)(skipna=skipna) + expected = getattr(pd.Series(s.astype("float64")), op_name)(skipna=skipna) + tm.assert_series_equal(result, expected, check_dtype=False) + if op_name in ("cummin", "cummax"): + assert is_bool_dtype(result) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): + pass + + class TestParsing(base.BaseParsingTests): pass diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 1e17bf33c806c..9a363c6a0f022 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -156,6 +156,12 @@ class TestReduce(base.BaseNoReduceTests): pass +class TestAccumulate(base.BaseAccumulateTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna): + pass + + class TestMethods(base.BaseMethodsTests): @pytest.mark.xfail(reason="Unobserved categories included") def test_value_counts(self, all_data, dropna): diff --git a/pandas/tests/extension/test_floating.py b/pandas/tests/extension/test_floating.py index 580ab743a9d93..f67f7dc56d26f 100644 --- a/pandas/tests/extension/test_floating.py +++ b/pandas/tests/extension/test_floating.py @@ -217,3 +217,9 @@ class TestParsing(base.BaseParsingTests): @pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning") class Test2DCompat(base.Dim2CompatTests): pass + + +class TestAccumulation(base.BaseAccumulateTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): + pass diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index ba6daf4f2e189..788a0bf46afc5 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -16,6 +16,11 @@ import numpy as np import pytest +from pandas.compat import ( + IS64, + is_platform_windows, +) + import pandas as pd import pandas._testing as tm from pandas.api.types import ( @@ -227,6 +232,56 @@ class TestBooleanReduce(base.BaseBooleanReduceTests): pass +class TestAccumulation(base.BaseAccumulateTests): + def check_accumulate(self, s, op_name, skipna): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + length = 64 + if not IS64 or is_platform_windows(): + if not s.dtype.itemsize == 8: + length = 32 + + if s.dtype.name.startswith("U"): + expected_dtype = f"UInt{length}" + else: + expected_dtype = f"Int{length}" + + if op_name == "cumsum": + result = getattr(s, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(s.astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name in ["cummax", "cummin"]: + result = getattr(s, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(s.astype("float64"), op_name)(skipna=skipna), + dtype=s.dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name == "cumprod": + result = getattr(s[:12], op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(s[:12].astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + else: + raise NotImplementedError(f"{op_name} not supported") + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna): + pass + + class TestPrinting(base.BasePrintingTests): pass diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index f82d3c6c06fca..022e5cb764e14 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -482,3 +482,9 @@ def test_EA_types(self, engine, data): with pytest.raises(NotImplementedError, match=expected_msg): with tm.assert_produces_warning(FutureWarning, match="astype from"): super().test_EA_types(engine, data) + + +class TestNoNumericAccumulations(base.BaseAccumulateTests): + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna): + pass
- [x] closes #28385 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This tries to finish #28509. I've reduced the scope to masked arrays and would prefer to do datetimelike as a follow up, if we want to change the behavior there. This is currently a big buggy as mentioned in https://github.com/pandas-dev/pandas/pull/28509#issuecomment-778741118 Currently, we are dispatching back to the current implementation. I was wondering about the exact interface. Currently, you'd have to call ``_accumulate`` with the name of the function, since ``cumsum`` etc. are not really added to the interface, meaning ``` arr = pd.array([1, 2, pd.NA], dtype="Int64") arr.cumsum() ``` raises, because the method is not registered. I this intended?
https://api.github.com/repos/pandas-dev/pandas/pulls/48111
2022-08-16T15:31:03Z
2022-12-13T01:43:52Z
2022-12-13T01:43:52Z
2022-12-13T22:32:36Z
ENH: dtype-unaware (empty) objects ("any" dtype)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b143e1e50aa6c..3ae503c18397a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2116,6 +2116,8 @@ def _setitem_with_indexer_missing(self, indexer, value): curr_dtype = self.obj.dtype curr_dtype = getattr(curr_dtype, "numpy_dtype", curr_dtype) new_dtype = maybe_promote(curr_dtype, value)[0] + elif self.obj.empty and is_object_dtype(self.obj.dtype): + new_dtype = self.obj.dtype else: new_dtype = None diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index b1eaf43f0b368..bb56cb21a1e3c 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -1939,7 +1939,7 @@ def test_loc_setitem_empty_series(self): # GH#5226 # partially set with an empty object series - ser = Series(dtype=object) + ser = Series() ser.loc[1] = 1 tm.assert_series_equal(ser, Series([1], index=[1])) ser.loc[3] = 3 @@ -1949,7 +1949,7 @@ def test_loc_setitem_empty_series_float(self): # GH#5226 # partially set with an empty object series - ser = Series(dtype=object) + ser = Series() ser.loc[1] = 1.0 tm.assert_series_equal(ser, Series([1.0], index=[1])) ser.loc[3] = 3.0 @@ -1959,7 +1959,7 @@ def test_loc_setitem_empty_series_str_idx(self): # GH#5226 # partially set with an empty object series - ser = Series(dtype=object) + ser = Series() ser.loc["foo"] = 1 tm.assert_series_equal(ser, Series([1], index=["foo"])) ser.loc["bar"] = 3 diff --git a/pandas/tests/series/indexing/test_set_value.py b/pandas/tests/series/indexing/test_set_value.py index cbe1a8bf296c8..3bd7d8bc8ce61 100644 --- a/pandas/tests/series/indexing/test_set_value.py +++ b/pandas/tests/series/indexing/test_set_value.py @@ -15,7 +15,7 @@ def test_series_set_value(): dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)] index = DatetimeIndex(dates) - s = Series(dtype=object) + s = Series() s._set_value(dates[0], 1.0) s._set_value(dates[1], np.nan) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 90051405c6935..47fd8bba78326 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -468,14 +468,14 @@ class TestSetitemWithExpansion: def test_setitem_empty_series(self): # GH#10193 key = Timestamp("2012-01-01") - series = Series(dtype=object) + series = Series() series[key] = 47 expected = Series(47, [key]) tm.assert_series_equal(series, expected) def test_setitem_empty_series_datetimeindex_preserves_freq(self): # GH#33573 our index should retain its freq - series = Series([], DatetimeIndex([], freq="D"), dtype=object) + series = Series([], DatetimeIndex([], freq="D")) key = Timestamp("2012-01-01") series[key] = 47 expected = Series(47, DatetimeIndex([key], freq="D")) @@ -1660,3 +1660,18 @@ def test_setitem_empty_mask_dont_upcast_dt64(): ser.mask(mask, "foo", inplace=True) assert ser.dtype == dti.dtype # no-op -> dont upcast tm.assert_series_equal(ser, orig) + + +def test_setitem_on_series_dtype_object(): + # GH#19647 + result = Series(dtype="object") + result.loc["int"] = 1 + result.loc["float"] = 2.0 + expected = Series(data=[1, 2.0], index=["int", "float"]).astype("object") + tm.assert_series_equal(result, expected) + + result = Series() + result.loc["int"] = 1 + result.loc["float"] = 2.0 + expected = Series(data=[1, 2.0], index=["int", "float"]).astype("float") + tm.assert_series_equal(result, expected)
- [ ] closes #19647 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48110
2022-08-16T13:20:11Z
2022-08-20T19:55:59Z
null
2022-08-20T19:56:06Z
ENH: Support mask in unique
diff --git a/asv_bench/benchmarks/hash_functions.py b/asv_bench/benchmarks/hash_functions.py index d9a291dc27125..da752b902b4fd 100644 --- a/asv_bench/benchmarks/hash_functions.py +++ b/asv_bench/benchmarks/hash_functions.py @@ -39,6 +39,21 @@ def time_unique(self, exponent): pd.unique(self.a2) +class Unique: + params = ["Int64", "Float64"] + param_names = ["dtype"] + + def setup(self, dtype): + self.ser = pd.Series(([1, pd.NA, 2] + list(range(100_000))) * 3, dtype=dtype) + self.ser_unique = pd.Series(list(range(300_000)) + [pd.NA], dtype=dtype) + + def time_unique_with_duplicates(self, exponent): + pd.unique(self.ser) + + def time_unique(self, exponent): + pd.unique(self.ser_unique) + + class NumericSeriesIndexing: params = [ diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 8a2b9c2f77627..3c9a1f86ad2a1 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -521,7 +521,7 @@ cdef class {{name}}HashTable(HashTable): def _unique(self, const {{dtype}}_t[:] values, {{name}}Vector uniques, Py_ssize_t count_prior=0, Py_ssize_t na_sentinel=-1, object na_value=None, bint ignore_na=False, - object mask=None, bint return_inverse=False): + object mask=None, bint return_inverse=False, bint use_result_mask=False): """ Calculate unique values and labels (no sorting!) @@ -551,6 +551,9 @@ cdef class {{name}}HashTable(HashTable): return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. + use_result_mask: bool, default False + Whether to create a result mask for the unique values. Not supported + with return_inverse=True. Returns ------- @@ -558,6 +561,8 @@ cdef class {{name}}HashTable(HashTable): Unique values of input, not sorted labels : ndarray[intp_t] (if return_inverse=True) The labels from values to uniques + result_mask: ndarray[bool], if use_result_mask is true + The mask for the result values. """ cdef: Py_ssize_t i, idx, count = count_prior, n = len(values) @@ -566,7 +571,9 @@ cdef class {{name}}HashTable(HashTable): {{c_type}} val, na_value2 khiter_t k {{name}}VectorData *ud - bint use_na_value, use_mask + UInt8Vector result_mask + UInt8VectorData *rmd + bint use_na_value, use_mask, seen_na = False uint8_t[:] mask_values if return_inverse: @@ -574,6 +581,14 @@ cdef class {{name}}HashTable(HashTable): ud = uniques.data use_na_value = na_value is not None use_mask = mask is not None + if not use_mask and use_result_mask: + raise NotImplementedError # pragma: no cover + + if use_result_mask and return_inverse: + raise NotImplementedError # pragma: no cover + + result_mask = UInt8Vector() + rmd = result_mask.data if use_mask: mask_values = mask.view("uint8") @@ -605,6 +620,27 @@ cdef class {{name}}HashTable(HashTable): # and replace the corresponding label with na_sentinel labels[i] = na_sentinel continue + elif not ignore_na and use_result_mask: + if mask_values[i]: + if seen_na: + continue + + seen_na = True + if needs_resize(ud): + with gil: + if uniques.external_view_exists: + raise ValueError("external reference to " + "uniques held, but " + "Vector.resize() needed") + uniques.resize() + if result_mask.external_view_exists: + raise ValueError("external reference to " + "result_mask held, but " + "Vector.resize() needed") + result_mask.resize() + append_data_{{dtype}}(ud, val) + append_data_uint8(rmd, 1) + continue k = kh_get_{{dtype}}(self.table, val) @@ -619,7 +655,16 @@ cdef class {{name}}HashTable(HashTable): "uniques held, but " "Vector.resize() needed") uniques.resize() + if use_result_mask: + if result_mask.external_view_exists: + raise ValueError("external reference to " + "result_mask held, but " + "Vector.resize() needed") + result_mask.resize() append_data_{{dtype}}(ud, val) + if use_result_mask: + append_data_uint8(rmd, 0) + if return_inverse: self.table.vals[k] = count labels[i] = count @@ -632,9 +677,11 @@ cdef class {{name}}HashTable(HashTable): if return_inverse: return uniques.to_array(), labels.base # .base -> underlying ndarray + if use_result_mask: + return uniques.to_array(), result_mask.to_array() return uniques.to_array() - def unique(self, const {{dtype}}_t[:] values, bint return_inverse=False): + def unique(self, const {{dtype}}_t[:] values, bint return_inverse=False, object mask=None): """ Calculate unique values and labels (no sorting!) @@ -645,6 +692,9 @@ cdef class {{name}}HashTable(HashTable): return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. + mask : ndarray[bool], optional + If not None, the mask is used as indicator for missing values + (True = missing, False = valid) instead of `na_value` or Returns ------- @@ -652,10 +702,13 @@ cdef class {{name}}HashTable(HashTable): Unique values of input, not sorted labels : ndarray[intp_t] (if return_inverse) The labels from values to uniques + result_mask: ndarray[bool], if mask is given as input + The mask for the result values. """ uniques = {{name}}Vector() + use_result_mask = True if mask is not None else False return self._unique(values, uniques, ignore_na=False, - return_inverse=return_inverse) + return_inverse=return_inverse, mask=mask, use_result_mask=use_result_mask) def factorize(self, const {{dtype}}_t[:] values, Py_ssize_t na_sentinel=-1, object na_value=None, object mask=None): @@ -1013,7 +1066,7 @@ cdef class StringHashTable(HashTable): return uniques.to_array(), labels.base # .base -> underlying ndarray return uniques.to_array() - def unique(self, ndarray[object] values, bint return_inverse=False): + def unique(self, ndarray[object] values, bint return_inverse=False, object mask=None): """ Calculate unique values and labels (no sorting!) @@ -1024,6 +1077,8 @@ cdef class StringHashTable(HashTable): return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. + mask : ndarray[bool], optional + Not yet implemented for StringHashTable Returns ------- @@ -1266,7 +1321,7 @@ cdef class PyObjectHashTable(HashTable): return uniques.to_array(), labels.base # .base -> underlying ndarray return uniques.to_array() - def unique(self, ndarray[object] values, bint return_inverse=False): + def unique(self, ndarray[object] values, bint return_inverse=False, object mask=None): """ Calculate unique values and labels (no sorting!) @@ -1277,6 +1332,8 @@ cdef class PyObjectHashTable(HashTable): return_inverse : bool, default False Whether the mapping of the original array values to their location in the vector of uniques should be returned. + mask : ndarray[bool], optional + Not yet implemented for PyObjectHashTable Returns ------- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a4736c2a141a5..1a5cda357296e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -404,6 +404,11 @@ def unique(values): >>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ + return unique_with_mask(values) + + +def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None = None): + """See algorithms.unique for docs. Takes a mask for masked arrays.""" values = _ensure_arraylike(values) if is_extension_array_dtype(values.dtype): @@ -414,9 +419,16 @@ def unique(values): htable, values = _get_hashtable_algo(values) table = htable(len(values)) - uniques = table.unique(values) - uniques = _reconstruct_data(uniques, original.dtype, original) - return uniques + if mask is None: + uniques = table.unique(values) + uniques = _reconstruct_data(uniques, original.dtype, original) + return uniques + + else: + uniques, mask = table.unique(values, mask=mask) + uniques = _reconstruct_data(uniques, original.dtype, original) + assert mask is not None # for mypy + return uniques, mask.astype("bool") unique1d = unique diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 128c7e44f5075..15946ab9ce80d 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -851,6 +851,17 @@ def copy(self: BaseMaskedArrayT) -> BaseMaskedArrayT: mask = mask.copy() return type(self)(data, mask, copy=False) + def unique(self: BaseMaskedArrayT) -> BaseMaskedArrayT: + """ + Compute the BaseMaskedArray of unique values. + + Returns + ------- + uniques : BaseMaskedArray + """ + uniques, mask = algos.unique_with_mask(self._data, self._mask) + return type(self)(uniques, mask, copy=False) + @doc(ExtensionArray.searchsorted) def searchsorted( self, diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index def63c552e059..fd617dcb4565e 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -834,6 +834,13 @@ def test_do_not_mangle_na_values(self, unique_nulls_fixture, unique_nulls_fixtur assert a[0] is unique_nulls_fixture assert a[1] is unique_nulls_fixture2 + def test_unique_masked(self, any_numeric_ea_dtype): + # GH#48019 + ser = Series([1, pd.NA, 2] * 3, dtype=any_numeric_ea_dtype) + result = pd.unique(ser) + expected = pd.array([1, pd.NA, 2], dtype=any_numeric_ea_dtype) + tm.assert_extension_array_equal(result, expected) + class TestIsin: def test_invalid(self):
- [x] closes #30037 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48109
2022-08-16T10:55:18Z
2022-08-18T06:16:39Z
2022-08-18T06:16:39Z
2022-08-18T07:29:10Z
DEV: add gitpod actions
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000000..c7082a1efc0d2 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,61 @@ +name: Build Base Docker Image + +# schedule the base docker build on any change of the environment file in the main branch, or +# at least once monthly, to catch ubuntu security updates +on: + push: + branches: + - main + paths: + - 'environment.yml' + schedule: + - cron: "0 0 1 * *" # monthly + +jobs: + build: + name: Build base Docker image + runs-on: ubuntu-latest + environment: pandas-dev + if: "github.repository_owner == 'pandas-dev' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + steps: + - name: Clone repository + uses: actions/checkout@v3 + - name: Lint Docker + uses: brpaz/hadolint-action@v2 + with: + dockerfile: ./tools/gitpod/Dockerfile + - name: Get refs + shell: bash + run: | + export raw_branch=${GITHUB_REF#refs/heads/} + echo "::set-output name=branch::${raw_branch//\//-}" + echo "::set-output name=date::$(date +'%Y%m%d')" + echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)" + id: getrefs + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: ${{ runner.os }}-buildx- + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v3 + with: + context: "." + file: "./tools/gitpod/Dockerfile" + push: ${{ github.event_name != 'pull_request' }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + tags: | + pandas/pandas-dev:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, pandas/pandas-dev:latest + - name: Image digest + # Return details of the image build: sha and shell + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/gitpod.yml b/.github/workflows/gitpod.yml new file mode 100644 index 0000000000000..190a977df7f0b --- /dev/null +++ b/.github/workflows/gitpod.yml @@ -0,0 +1,57 @@ +name: Build Gitpod Docker image + +on: + push: + branches: + - main + +jobs: + build: + name: Build Gitpod Docker image + runs-on: ubuntu-latest + environment: pandas-dev + if: "github.repository_owner == 'pandas-dev' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" + steps: + - name: Clone repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Lint Docker + uses: brpaz/hadolint-action@v2 + with: + dockerfile: ./tools/gitpod/gitpod.Dockerfile + - name: Get refs + shell: bash + run: | + export raw_branch=${GITHUB_REF#refs/heads/} + echo "::set-output name=branch::${raw_branch//\//-}" + echo "::set-output name=date::$(date +'%Y%m%d')" + echo "::set-output name=sha8::$(echo ${GITHUB_SHA} | cut -c1-8)" + id: getrefs + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: ${{ runner.os }}-buildx- + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push + id: docker_build + uses: docker/build-push-action@v3 + with: + context: "." + file: "./tools/gitpod/gitpod.Dockerfile" + push: ${{ github.event_name != 'pull_request' }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + tags: | + pandas/pandas-gitpod:${{ steps.getrefs.outputs.date }}-${{ steps.getrefs.outputs.branch}}-${{ steps.getrefs.outputs.sha8 }}, pandas/pandas-gitpod:latest + - name: Image digest + # Return details of the image build: sha and shell + run: echo ${{ steps.docker_build.outputs.digest }}
xref #47790 1. Adding an action to create the base docker file. It runs when the `environment.yml` on main is updated, or at least once monthly, to catch Ubuntu security updates 2. Adding an action to create the Gitpod docker file. It runs when main is updated. Sidenote: I'm going on a few days vacation, and will be back to test the actions next week, so I made this a draft PR for now. If someone with docker/actions experience wants to leave a quick review—awesome. When I'm back, I will be testing it with my private account until I can access a pandas docker hub account. CC @jorisvandenbossche @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/48108
2022-08-16T10:20:11Z
2023-05-15T20:54:34Z
null
2023-05-15T20:54:34Z
DEV: Add gitpod files
diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 0000000000000..6bba39823791e --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1,58 @@ +# Building pandas on init +# Might delegate this later to prebuild with Q2 improvements on gitpod +# https://www.gitpod.io/docs/config-start-tasks/#configuring-the-terminal +# ------------------------------------------------------------------------- + +# assuming we use dockerhub: name of the docker user, docker image, tag, e.g. https://hub.docker.com/r/pandas/pandas-gitpod/tags +image: pythonpandas/pandas-gitpod:latest +tasks: + - name: Prepare development environment + init: | + mkdir -p .vscode + cp gitpod/settings.json .vscode/settings.json + conda activate pandas-dev + git pull --unshallow # need to force this else the prebuild fails + git fetch --tags + python setup.py build_ext -j 4 + python -m pip install -e . --no-build-isolation + echo "🛠 Completed rebuilding Pandas!! 🛠 " + echo "✨ Pre-build complete! You can close this terminal ✨ " + +# -------------------------------------------------------- +# exposing ports for liveserve +ports: + - port: 5500 + onOpen: notify + +# -------------------------------------------------------- +# some useful extensions to have +vscode: + extensions: + - ms-python.python + - yzhang.markdown-all-in-one + - eamodio.gitlens + - lextudio.restructuredtext + # add or remove what you think is generally useful to most contributors + # avoid adding too many. they each open a pop-up window + +# -------------------------------------------------------- +# using prebuilds for the container +# With this configuration the prebuild will happen on push to main +github: + prebuilds: + # enable for main/default branch + main: true + # enable for other branches (defaults to false) + branches: false + # enable for pull requests coming from this repo (defaults to true) + pullRequests: false + # enable for pull requests coming from forks (defaults to false) + pullRequestsFromForks: false + # add a check to pull requests (defaults to true) + addCheck: false + # add a "Review in Gitpod" button as a comment to pull requests (defaults to false) + addComment: false + # add a "Review in Gitpod" button to the pull request's description (defaults to false) + addBadge: false + # add a label once the prebuild is ready to pull requests (defaults to false) + addLabel: false diff --git a/gitpod/Dockerfile b/gitpod/Dockerfile new file mode 100644 index 0000000000000..299267a11fdd1 --- /dev/null +++ b/gitpod/Dockerfile @@ -0,0 +1,100 @@ +# +# Dockerfile for pandas development +# +# Usage: +# ------- +# +# To make a local build of the container, from the 'Docker-dev' directory: +# docker build --rm -f "Dockerfile" -t <build-tag> "." +# +# To use the container use the following command. It assumes that you are in +# the root folder of the pandas git repository, making it available as +# /home/pandas in the container. Whatever changes you make to that directory +# are visible in the host and container. +# The docker image is retrieved from the pandas dockerhub repository +# +# docker run --rm -it -v $(pwd):/home/pandas pandas/pandas-dev:<image-tag> +# +# By default the container will activate the conda environment pandas-dev +# which contains all the dependencies needed for pandas development +# +# To build and install pandas run: +# python setup.py build_ext -j 4 +# python -m pip install -e . --no-build-isolation +# +# This image is based on: Ubuntu 20.04 (focal) +# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal +# OS/ARCH: linux/amd64 +FROM gitpod/workspace-base:latest + +ARG MAMBAFORGE_VERSION="22.9.0-1" +ARG CONDA_ENV=pandas-dev +ARG PANDAS_HOME="/home/pandas" + + +# ---- Configure environment ---- +ENV CONDA_DIR=/home/gitpod/mambaforge3 \ + SHELL=/bin/bash +ENV PATH=${CONDA_DIR}/bin:$PATH \ + WORKSPACE=/workspace/pandas + +# ----------------------------------------------------------------------------- +# ---- Creating as root - note: make sure to change to gitpod in the end ---- +USER root + +# Avoid warnings by switching to noninteractive +ENV DEBIAN_FRONTEND=noninteractive + +# Configure apt and install packages +RUN apt-get update \ + && apt-get -y install --no-install-recommends apt-utils dialog 2>&1 \ + # + # Install tzdata and configure timezone (fix for tests which try to read from "/etc/localtime") + && apt-get -y install tzdata \ + && ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime \ + && dpkg-reconfigure -f noninteractive tzdata \ + # + # Verify git, process tools, lsb-release (common in install instructions for CLIs) installed + && apt-get -y install git iproute2 procps iproute2 lsb-release \ + # + # cleanup + && apt-get autoremove -y \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* + +# Switch back to dialog for any ad-hoc use of apt-get +ENV DEBIAN_FRONTEND=dialog + +# Allows this Dockerfile to activate conda environments +SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] + +# ----------------------------------------------------------------------------- +# ---- Installing mamba ---- +RUN wget -q -O mambaforge3.sh \ + "https://github.com/conda-forge/miniforge/releases/download/$MAMBAFORGE_VERSION/Mambaforge-$MAMBAFORGE_VERSION-Linux-x86_64.sh" && \ + bash mambaforge3.sh -p ${CONDA_DIR} -b && \ + rm mambaforge3.sh + +# ----------------------------------------------------------------------------- +# ---- Copy needed files ---- +# basic workspace configurations +COPY ./gitpod/workspace_config /usr/local/bin/workspace_config + +RUN chmod a+rx /usr/local/bin/workspace_config && \ + workspace_config + +# the container to create a conda environment from it +COPY environment.yml /tmp/environment.yml + +RUN mamba env create -f /tmp/environment.yml +# ---- Create conda environment ---- +RUN conda activate $CONDA_ENV && \ + mamba install ccache -y && \ + # needed for docs rendering later on + python -m pip install --no-cache-dir sphinx-autobuild && \ + conda clean --all -f -y && \ + rm -rf /tmp/* + +# ----------------------------------------------------------------------------- +# Always make sure we are not root +USER gitpod diff --git a/gitpod/gitpod.Dockerfile b/gitpod/gitpod.Dockerfile new file mode 100644 index 0000000000000..108aae452aa3d --- /dev/null +++ b/gitpod/gitpod.Dockerfile @@ -0,0 +1,46 @@ +# Doing a local shallow clone - keeps the container secure +# and much slimmer than using COPY directly or making a +# remote clone +ARG BASE_CONTAINER="pythonpandas/pandas-dev:latest" +FROM gitpod/workspace-base:latest as clone + +# the clone should be deep enough for versioneer to work +RUN git clone https://github.com/pandas-dev/pandas --depth 12 /tmp/pandas + +# ----------------------------------------------------------------------------- +# Using the pandas-dev Docker image as a base +# This way, we ensure we have all the needed compilers and dependencies +# while reducing the build time +FROM ${BASE_CONTAINER} as build + +# ----------------------------------------------------------------------------- +USER root + +# ----------------------------------------------------------------------------- +# ---- ENV variables ---- +# ---- Directories needed ---- +ENV WORKSPACE=/workspace/pandas/ \ + CONDA_ENV=pandas-dev + +# Allows this micromamba.Dockerfile to activate conda environments +SHELL ["/bin/bash", "--login", "-o", "pipefail", "-c"] + +# Copy over the shallow clone +COPY --from=clone --chown=gitpod /tmp/pandas ${WORKSPACE} + +# Everything happens in the /workspace/pandas directory +WORKDIR ${WORKSPACE} + +# Build pandas to populate the cache used by ccache +RUN git config --global --add safe.directory /workspace/pandas +RUN conda activate ${CONDA_ENV} && \ + python setup.py build_ext --inplace && \ + ccache -s + +# Gitpod will load the repository into /workspace/pandas. We remove the +# directory from the image to prevent conflicts +RUN rm -rf ${WORKSPACE} + +# ----------------------------------------------------------------------------- +# Always return to non privileged user +USER gitpod diff --git a/gitpod/settings.json b/gitpod/settings.json new file mode 100644 index 0000000000000..6251c55878541 --- /dev/null +++ b/gitpod/settings.json @@ -0,0 +1,6 @@ +{ + "restructuredtext.updateOnTextChanged": "true", + "restructuredtext.updateDelay": 300, + "restructuredtext.linter.disabledLinters": ["doc8","rst-lint", "rstcheck"], + "python.defaultInterpreterPath": "/home/gitpod/mambaforge3/envs/pandas-dev/bin/python" +} diff --git a/gitpod/workspace_config b/gitpod/workspace_config new file mode 100644 index 0000000000000..d49c93ec83db9 --- /dev/null +++ b/gitpod/workspace_config @@ -0,0 +1,54 @@ +#!/bin/bash +# Basic configurations for the workspace + +set -e + +# gitpod/workspace-base needs at least one file here +touch /home/gitpod/.bashrc.d/empty + +# Add git aliases +git config --global alias.co checkout +git config --global alias.ci commit +git config --global alias.st status +git config --global alias.br branch +git config --global alias.hist "log --pretty=format:'%h %ad | %s%d [%an]' --graph --date=short" +git config --global alias.type 'cat-file -t' +git config --global alias.dump 'cat-file -p' + +# Enable basic vim defaults in ~/.vimrc +echo "filetype plugin indent on" >>~/.vimrc +echo "set colorcolumn=80" >>~/.vimrc +echo "set number" >>~/.vimrc +echo "syntax enable" >>~/.vimrc + +# Vanity custom bash prompt - makes it more legible +echo "PS1='\[\e]0;\u \w\a\]\[\033[01;36m\]\u\[\033[m\] > \[\033[38;5;141m\]\w\[\033[m\] \\$ '" >>~/.bashrc + +# Enable prompt color in the skeleton .bashrc +# hadolint ignore=SC2016 +sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc + +# .gitpod.yml is configured to install pandas from /workspace/pandas +echo "export PYTHONPATH=${WORKSPACE}" >>~/.bashrc + +# make conda activate command available from /bin/bash (login and interactive) +if [[ ! -f "/etc/profile.d/conda.sh" ]]; then + ln -s ${CONDA_DIR}/etc/profile.d/conda.sh /etc/profile.d/conda.sh +fi +echo ". ${CONDA_DIR}/etc/profile.d/conda.sh" >>~/.bashrc +echo "conda activate pandas-dev" >>~/.bashrc + +# Enable prompt color in the skeleton .bashrc +# hadolint ignore=SC2016 +sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc + +# .gitpod.yml is configured to install pandas from /workspace/pandas +echo "export PYTHONPATH=/workspace/pandas" >>~/.bashrc + +# Set up ccache for compilers for this Dockerfile +# REF: https://github.com/conda-forge/compilers-feedstock/issues/31 +echo "conda activate pandas-dev" >>~/.startuprc +echo "export CC=\"ccache \$CC\"" >>~/.startuprc +echo "export CXX=\"ccache \$CXX\"" >>~/.startuprc +echo "source ~/.startuprc" >>~/.profile +echo "source ~/.startuprc" >>~/.bashrc
xref #47790 1. I split the Docker file into 2 Dockers 2. The base Docker includes the pandas environment, and has a new way to run Mamba in Docker, and builds on my machine (M1 mac). 3. The `gitpod.docker` copies the base docker, which will speed things up for any Gitpod user, and includes a shallow clone of the repository (more speed). 4. The workspace file defines some nice VS Code features, and the caches for C, C++, and the creation of `.profile` and `.bashrc`. Some things in there are finicky to order of operations (also in relation to the GitHub action which is coming in the next PR), so I recommend avoiding rearranging that file 😉 5. I added the Gitpod files in a new library, currently under `root` because there isn't a `tools` library. Maybe in the future, we can discuss renaming `scripts` to `tools` to avoid adding another folder? I don't have a strong opinion on this, I just felt awkward about adding a folder there. Sidenote: I'm going on a few days vacation, and will be back to test the actions next week so I made this a draft PR for now. If someone with docker/actions experience wants to leave a quick review—awesome. When I'm back, I will be testing it with my private account until I can access a pandas docker hub account. CC @jorisvandenbossche @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/48107
2022-08-16T10:17:00Z
2022-12-02T08:41:44Z
2022-12-02T08:41:44Z
2022-12-02T08:41:44Z
REGR: setting numeric value in Categorical Series with enlargement raise internal error
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5340bc6b590c4..3dfc544273a64 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -591,6 +591,12 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): fv = na_value_for_dtype(dtype) return dtype, fv + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories or isna(fill_value): + return dtype, fill_value + else: + return object, ensure_object(fill_value) + elif isna(fill_value): dtype = _dtype_obj if fill_value is None: diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 4c38a2219372d..a7c03c672be58 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -18,6 +18,7 @@ import pandas as pd from pandas import ( Categorical, + CategoricalDtype, CategoricalIndex, DataFrame, DatetimeIndex, @@ -1820,6 +1821,54 @@ def test_loc_getitem_sorted_index_level_with_duplicates(self): result = df.loc[("foo", "bar")] tm.assert_frame_equal(result, expected) + def test_additional_element_to_categorical_series_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = 0 + expected = Series(["a", "b", "c", 0], dtype="object") + tm.assert_series_equal(result, expected) + + def test_additional_categorical_element_loc(self): + # GH#47677 + result = Series(["a", "b", "c"], dtype="category") + result.loc[3] = "a" + expected = Series(["a", "b", "c", "a"], dtype="category") + tm.assert_series_equal(result, expected) + + def test_loc_set_nan_in_categorical_series(self, any_numeric_ea_dtype): + # GH#47677 + srs = Series( + [1, 2, 3], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + # enlarge + srs.loc[3] = np.nan + expected = Series( + [1, 2, 3, np.nan], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + tm.assert_series_equal(srs, expected) + # set into + srs.loc[1] = np.nan + expected = Series( + [1, np.nan, 3, np.nan], + dtype=CategoricalDtype(Index([1, 2, 3], dtype=any_numeric_ea_dtype)), + ) + tm.assert_series_equal(srs, expected) + + @pytest.mark.parametrize("na", (np.nan, pd.NA, None, pd.NaT)) + def test_loc_consistency_series_enlarge_set_into(self, na): + # GH#47677 + srs_enlarge = Series(["a", "b", "c"], dtype="category") + srs_enlarge.loc[3] = na + + srs_setinto = Series(["a", "b", "c", "a"], dtype="category") + srs_setinto.loc[3] = na + + tm.assert_series_equal(srs_enlarge, srs_setinto) + expected = Series(["a", "b", "c", na], dtype="category") + tm.assert_series_equal(srs_enlarge, expected) + def test_loc_getitem_preserves_index_level_category_dtype(self): # GH#15166 df = DataFrame(
- [x] closes #47677 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Somehow I can not push to the branch of #47751 @CloseChoice Feel free to continue on your branch, if you are available. Otherwise we can merge this one.
https://api.github.com/repos/pandas-dev/pandas/pulls/48106
2022-08-16T09:55:33Z
2022-08-16T21:23:46Z
2022-08-16T21:23:46Z
2022-08-16T21:23:50Z
ASV: Add asv for clip with date range
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 1d3bd4a357d24..09c318af76159 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -144,6 +144,16 @@ def time_clip(self, n): self.s.clip(0, 1) +class ClipDt: + def setup(self): + dr = date_range("20220101", periods=100_000, freq="s", tz="UTC") + self.clipper_dt = dr[0:1_000].repeat(100) + self.s = Series(dr) + + def time_clip(self): + self.s.clip(upper=self.clipper_dt) + + class ValueCounts: params = [[10**3, 10**4, 10**5], ["int", "uint", "float", "object"]]
- [x] closes #47955 (Replace xxxx with the Github issue number)
https://api.github.com/repos/pandas-dev/pandas/pulls/48105
2022-08-16T09:25:54Z
2022-08-16T17:40:51Z
2022-08-16T17:40:51Z
2022-08-16T17:41:16Z
DOC: GH48019 add styler to excel
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d9264e8a18f2e..bc736773510fa 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2256,6 +2256,7 @@ def to_excel( ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. + io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes -----
- [x] closes #48019 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48104
2022-08-16T07:18:13Z
2022-08-16T18:26:26Z
2022-08-16T18:26:26Z
2022-08-16T18:26:33Z
DOC: GH48089 Update doc for df to latex
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d9264e8a18f2e..93d831105339d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3351,7 +3351,8 @@ def to_latex( {returns} See Also -------- - Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. + io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX + with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table.
- [x] closes #48089 (Replace xxxx with the Github issue number) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/48102
2022-08-16T00:40:29Z
2022-08-16T18:25:32Z
2022-08-16T18:25:32Z
2022-08-16T18:32:47Z
DOC: Altered capitalization validation script to handle edge cases
diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 166162a4763bf..f6e9dbc74e4b0 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -206,7 +206,7 @@ invoked with the following command D-Tale integrates seamlessly with Jupyter notebooks, Python terminals, Kaggle & Google Colab. Here are some demos of the `grid <http://alphatechadmin.pythonanywhere.com/dtale/main/1>`__. -`hvplot <https://hvplot.holoviz.org/index.html>`__ +`Hvplot <https://hvplot.holoviz.org/index.html>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ hvPlot is a high-level plotting API for the PyData ecosystem built on `HoloViews <https://holoviews.org/>`__. diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index e7233484e16b6..b0a52963d6ab1 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -150,7 +150,12 @@ "LZMA", "Numba", "Timestamp", - "PyArrow", + "STUMPY", + "IDE", + "plotnine", + "D-Tale", + "pandaSDMX", + "PyArrow" } CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS} @@ -184,12 +189,20 @@ def correct_title_capitalization(title: str) -> str: # first word character. correct_title: str = re.sub(r"^\W*", "", title).capitalize() - # Remove a URL from the title. We do this because words in a URL must - # stay lowercase, even if they are a capitalization exception. - removed_https_title = re.sub(r"<https?:\/\/.*[\r\n]*>", "", correct_title) - # Split a title into a list using non-word character delimiters. - word_list = re.split(r"\W", removed_https_title) + word_list = re.split(r"\W", correct_title) + + # Recombine hyphenated words + for word in correct_title.split(): + if '-' in word: + lst = word.split('-') + first = lst[0] + for idx, val in enumerate(word_list): + if val == first: + for _ in range(len(lst)): + del word_list[idx] + word_list.insert(idx, '-'.join(lst)) + break for word in word_list: if word.lower() in CAP_EXCEPTIONS_DICT: @@ -255,10 +268,12 @@ def main(source_paths: list[str]) -> int: for filename in source_paths: for title, line_number in find_titles(filename): - if title != correct_title_capitalization(title): + removed_https_title = re.sub(r"<https?:\/\/.*[\r\n]*>", "", title) + if removed_https_title != correct_title_capitalization(removed_https_title): print( - f"""{filename}:{line_number}:{err_msg} "{title}" to "{ - correct_title_capitalization(title)}" """ + f"""{filename}:{line_number}:{err_msg} "{ + removed_https_title.strip()}" to "{ + correct_title_capitalization(removed_https_title).strip()}" """ ) number_of_errors += 1
references #32550 I fixed the capitalization validation script to stop considering urls. I also altered the script to consider hyphenated words as a single word. Previously this was asking you to change the capitalization of packages that had "Pandas" in the name, and adding that to the exceptions list would have let the script pass through instances where Pandas DID need to be changed to lowercase.
https://api.github.com/repos/pandas-dev/pandas/pulls/48100
2022-08-15T22:53:32Z
2022-10-04T18:29:08Z
null
2022-10-04T18:29:08Z
TYP: reshape
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6cfca4ebdc612..0db4cdf9f5c26 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8942,7 +8942,7 @@ def explode( if is_scalar(column) or isinstance(column, tuple): columns = [column] elif isinstance(column, list) and all( - map(lambda c: is_scalar(c) or isinstance(c, tuple), column) + is_scalar(c) or isinstance(c, tuple) for c in column ): if not column: raise ValueError("column must be nonempty") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d9264e8a18f2e..aeaa4a432ad4a 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1613,7 +1613,7 @@ def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: # have consistent precedence and validation logic throughout the library. @final - def _is_level_reference(self, key, axis=0): + def _is_level_reference(self, key: Level, axis=0) -> bool_t: """ Test whether a key is a level reference for a given axis. @@ -1625,7 +1625,7 @@ def _is_level_reference(self, key, axis=0): Parameters ---------- - key : str + key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) @@ -1644,7 +1644,7 @@ def _is_level_reference(self, key, axis=0): ) @final - def _is_label_reference(self, key, axis=0) -> bool_t: + def _is_label_reference(self, key: Level, axis=0) -> bool_t: """ Test whether a key is a label reference for a given axis. @@ -1654,8 +1654,8 @@ def _is_label_reference(self, key, axis=0) -> bool_t: Parameters ---------- - key : str - Potential label name + key : Hashable + Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) @@ -1674,7 +1674,7 @@ def _is_label_reference(self, key, axis=0) -> bool_t: ) @final - def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: + def _is_label_or_level_reference(self, key: Level, axis: int = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. @@ -1685,7 +1685,7 @@ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: Parameters ---------- - key : str + key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) @@ -1699,7 +1699,7 @@ def _is_label_or_level_reference(self, key: str, axis: int = 0) -> bool_t: ) @final - def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: + def _check_label_or_level_ambiguity(self, key: Level, axis: int = 0) -> None: """ Check whether `key` is ambiguous. @@ -1708,7 +1708,7 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: Parameters ---------- - key : str or object + key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). @@ -1717,6 +1717,7 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: ------ ValueError: `key` is ambiguous """ + axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) @@ -1743,7 +1744,7 @@ def _check_label_or_level_ambiguity(self, key, axis: int = 0) -> None: raise ValueError(msg) @final - def _get_label_or_level_values(self, key: Level, axis: int = 0) -> np.ndarray: + def _get_label_or_level_values(self, key: Level, axis: int = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. @@ -1758,14 +1759,14 @@ def _get_label_or_level_values(self, key: Level, axis: int = 0) -> np.ndarray: Parameters ---------- - key : str + key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- - values : np.ndarray + np.ndarray or ExtensionArray Raises ------ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 23784ed3bb87e..3e3ff68bb8719 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -11,6 +11,7 @@ from typing import ( TYPE_CHECKING, Hashable, + Sequence, cast, ) import uuid @@ -25,6 +26,7 @@ lib, ) from pandas._typing import ( + AnyArrayLike, ArrayLike, DtypeObj, IndexLabel, @@ -609,6 +611,21 @@ class _MergeOperation: """ _merge_type = "merge" + how: str + on: IndexLabel | None + # left_on/right_on may be None when passed, but in validate_specification + # get replaced with non-None. + left_on: Sequence[Hashable | AnyArrayLike] + right_on: Sequence[Hashable | AnyArrayLike] + left_index: bool + right_index: bool + axis: int + bm_axis: int + sort: bool + suffixes: Suffixes + copy: bool + indicator: bool + validate: str | None def __init__( self, @@ -819,8 +836,16 @@ def _maybe_restore_index_levels(self, result: DataFrame) -> None: self.join_names, self.left_on, self.right_on ): if ( - self.orig_left._is_level_reference(left_key) - and self.orig_right._is_level_reference(right_key) + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + self.orig_left._is_level_reference(left_key) # type: ignore[arg-type] + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + and self.orig_right._is_level_reference( + right_key # type: ignore[arg-type] + ) and left_key == right_key and name not in result.index.names ): @@ -1049,13 +1074,13 @@ def _get_merge_keys(self): Returns ------- - left_keys, right_keys + left_keys, right_keys, join_names """ - left_keys = [] - right_keys = [] - # error: Need type annotation for 'join_names' (hint: "join_names: List[<type>] - # = ...") - join_names = [] # type: ignore[var-annotated] + # left_keys, right_keys entries can actually be anything listlike + # with a 'dtype' attr + left_keys: list[AnyArrayLike] = [] + right_keys: list[AnyArrayLike] = [] + join_names: list[Hashable] = [] right_drop = [] left_drop = [] @@ -1078,11 +1103,16 @@ def _get_merge_keys(self): if _any(self.left_on) and _any(self.right_on): for lk, rk in zip(self.left_on, self.right_on): if is_lkey(lk): + lk = cast(AnyArrayLike, lk) left_keys.append(lk) if is_rkey(rk): + rk = cast(AnyArrayLike, rk) right_keys.append(rk) join_names.append(None) # what to do? else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) join_names.append(rk) @@ -1092,6 +1122,9 @@ def _get_merge_keys(self): join_names.append(right.index.name) else: if not is_rkey(rk): + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) else: @@ -1104,8 +1137,12 @@ def _get_merge_keys(self): else: left_drop.append(lk) else: + rk = cast(AnyArrayLike, rk) right_keys.append(rk) if lk is not None: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + lk = cast(Hashable, lk) left_keys.append(left._get_label_or_level_values(lk)) join_names.append(lk) else: @@ -1115,9 +1152,13 @@ def _get_merge_keys(self): elif _any(self.left_on): for k in self.left_on: if is_lkey(k): + k = cast(AnyArrayLike, k) left_keys.append(k) join_names.append(None) else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) left_keys.append(left._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.right.index, MultiIndex): @@ -1132,9 +1173,13 @@ def _get_merge_keys(self): elif _any(self.right_on): for k in self.right_on: if is_rkey(k): + k = cast(AnyArrayLike, k) right_keys.append(k) join_names.append(None) else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) right_keys.append(right._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.left.index, MultiIndex): diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 5039a29b74f1b..52b059f6b92af 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1,7 +1,10 @@ from __future__ import annotations import itertools -from typing import TYPE_CHECKING +from typing import ( + TYPE_CHECKING, + cast, +) import warnings import numpy as np @@ -452,7 +455,7 @@ def _unstack_multiple(data, clocs, fill_value=None): return unstacked -def unstack(obj, level, fill_value=None): +def unstack(obj: Series | DataFrame, level, fill_value=None): if isinstance(level, (tuple, list)): if len(level) != 1: @@ -489,19 +492,20 @@ def unstack(obj, level, fill_value=None): ) -def _unstack_frame(obj, level, fill_value=None): +def _unstack_frame(obj: DataFrame, level, fill_value=None): + assert isinstance(obj.index, MultiIndex) # checked by caller + unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor) + if not obj._can_fast_transpose: - unstacker = _Unstacker(obj.index, level=level) mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) return obj._constructor(mgr) else: - unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor) return unstacker.get_result( obj._values, value_columns=obj.columns, fill_value=fill_value ) -def _unstack_extension_series(series, level, fill_value): +def _unstack_extension_series(series: Series, level, fill_value) -> DataFrame: """ Unstack an ExtensionArray-backed Series. @@ -534,14 +538,14 @@ def _unstack_extension_series(series, level, fill_value): return result -def stack(frame, level=-1, dropna=True): +def stack(frame: DataFrame, level=-1, dropna: bool = True): """ Convert DataFrame to Series with multi-level Index. Columns become the second level of the resulting hierarchical index Returns ------- - stacked : Series + stacked : Series or DataFrame """ def factorize(index): @@ -676,8 +680,10 @@ def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex: ) -def _stack_multi_columns(frame, level_num=-1, dropna=True): - def _convert_level_number(level_num: int, columns): +def _stack_multi_columns( + frame: DataFrame, level_num: int = -1, dropna: bool = True +) -> DataFrame: + def _convert_level_number(level_num: int, columns: Index): """ Logic for converting the level number to something we can safely pass to swaplevel. @@ -690,32 +696,36 @@ def _convert_level_number(level_num: int, columns): return level_num - this = frame.copy() + this = frame.copy(deep=False) + mi_cols = this.columns # cast(MultiIndex, this.columns) + assert isinstance(mi_cols, MultiIndex) # caller is responsible # this makes life much simpler - if level_num != frame.columns.nlevels - 1: + if level_num != mi_cols.nlevels - 1: # roll levels to put selected level at end - roll_columns = this.columns - for i in range(level_num, frame.columns.nlevels - 1): + roll_columns = mi_cols + for i in range(level_num, mi_cols.nlevels - 1): # Need to check if the ints conflict with level names lev1 = _convert_level_number(i, roll_columns) lev2 = _convert_level_number(i + 1, roll_columns) roll_columns = roll_columns.swaplevel(lev1, lev2) - this.columns = roll_columns + this.columns = mi_cols = roll_columns - if not this.columns._is_lexsorted(): + if not mi_cols._is_lexsorted(): # Workaround the edge case where 0 is one of the column names, # which interferes with trying to sort based on the first # level - level_to_sort = _convert_level_number(0, this.columns) + level_to_sort = _convert_level_number(0, mi_cols) this = this.sort_index(level=level_to_sort, axis=1) + mi_cols = this.columns - new_columns = _stack_multi_column_index(this.columns) + mi_cols = cast(MultiIndex, mi_cols) + new_columns = _stack_multi_column_index(mi_cols) # time to ravel the values new_data = {} - level_vals = this.columns.levels[-1] - level_codes = sorted(set(this.columns.codes[-1])) + level_vals = mi_cols.levels[-1] + level_codes = sorted(set(mi_cols.codes[-1])) level_vals_nan = level_vals.insert(len(level_vals), None) level_vals_used = np.take(level_vals_nan, level_codes)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48099
2022-08-15T22:30:25Z
2022-08-16T18:29:33Z
2022-08-16T18:29:33Z
2022-08-16T19:15:57Z
Initial Commit
diff --git a/README.md b/README.md index aaf63ead9c416..1550cc8a1f151 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ </div> ----------------- - +# ABCD # pandas: powerful Python data analysis toolkit [![PyPI Latest Release](https://img.shields.io/pypi/v/pandas.svg)](https://pypi.org/project/pandas/) [![Conda Latest Release](https://anaconda.org/conda-forge/pandas/badges/version.svg)](https://anaconda.org/anaconda/pandas/)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48096
2022-08-15T20:35:42Z
2022-08-15T20:58:06Z
null
2022-08-15T20:58:06Z
CI/TST: Fix test_resample_empty_dataframe for 32 bit build
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 403eeb8db73f9..04f147ee40e62 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -169,7 +169,7 @@ def test_resample_empty_dataframe(empty_frame_dti, freq, resample_method): expected = df.copy() else: # GH14962 - expected = Series([], dtype=int) + expected = Series([], dtype=np.int64) expected.index = _asfreq_compat(df.index, freq)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). xref https://github.com/pandas-dev/pandas/pull/48072
https://api.github.com/repos/pandas-dev/pandas/pulls/48095
2022-08-15T20:13:49Z
2022-08-15T23:09:09Z
2022-08-15T23:09:09Z
2022-08-16T10:06:02Z
BUG: df.__setitem__ casting ea to object when setting with scalar key and frame value
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a1a2149da7cf6..94167299efa90 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -968,6 +968,7 @@ Indexing - Bug in :meth:`Index.reindex` raising ``AssertionError`` when ``level`` was specified but no :class:`MultiIndex` was given; level is ignored now (:issue:`35132`) - Bug when setting a value too large for a :class:`Series` dtype failing to coerce to a common type (:issue:`26049`, :issue:`32878`) - Bug in :meth:`loc.__setitem__` treating ``range`` keys as positional instead of label-based (:issue:`45479`) +- Bug in :meth:`DataFrame.__setitem__` casting extension array dtypes to object when setting with a scalar key and :class:`DataFrame` as value (:issue:`46896`) - Bug in :meth:`Series.__setitem__` when setting ``boolean`` dtype values containing ``NA`` incorrectly raising instead of casting to ``boolean`` dtype (:issue:`45462`) - Bug in :meth:`Series.loc` raising with boolean indexer containing ``NA`` when :class:`Index` did not match (:issue:`46551`) - Bug in :meth:`Series.__setitem__` where setting :attr:`NA` into a numeric-dtype :class:`Series` would incorrectly upcast to object-dtype rather than treating the value as ``np.nan`` (:issue:`44199`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6cfca4ebdc612..2daf93bbebe48 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4064,13 +4064,30 @@ def _set_item_frame_value(self, key, value: DataFrame) -> None: if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): - cols = maybe_droplevels(cols, key) - if len(cols) and not cols.equals(value.columns): - value = value.reindex(cols, axis=1) + cols_droplevel = maybe_droplevels(cols, key) + if len(cols_droplevel) and not cols_droplevel.equals(value.columns): + value = value.reindex(cols_droplevel, axis=1) - # now align rows - arraylike = _reindex_for_setitem(value, self.index) - self._set_item_mgr(key, arraylike) + for col, col_droplevel in zip(cols, cols_droplevel): + self[col] = value[col_droplevel] + return + + if is_scalar(cols): + self[cols] = value[value.columns[0]] + return + + # now align rows + arraylike = _reindex_for_setitem(value, self.index) + self._set_item_mgr(key, arraylike) + return + + if len(value.columns) != 1: + raise ValueError( + "Cannot set a DataFrame with multiple columns to the single " + f"column {key}" + ) + + self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False diff --git a/pandas/tests/frame/indexing/test_coercion.py b/pandas/tests/frame/indexing/test_coercion.py index cf4af32fc887a..c970558162707 100644 --- a/pandas/tests/frame/indexing/test_coercion.py +++ b/pandas/tests/frame/indexing/test_coercion.py @@ -164,7 +164,6 @@ def test_12499(): tm.assert_frame_equal(df, expected) [email protected](reason="Too many columns cast to float64") def test_20476(): mi = MultiIndex.from_product([["A", "B"], ["a", "b", "c"]]) df = DataFrame(-1, index=range(3), columns=mi) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index cd547819dbe94..6b19738becc8e 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -91,8 +91,8 @@ def test_setitem_error_msmgs(self): # GH 4107, more descriptive error message df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"]) - msg = "incompatible index of inserted column with frame index" - with pytest.raises(TypeError, match=msg): + msg = "Cannot set a DataFrame with multiple columns to the single column gr" + with pytest.raises(ValueError, match=msg): df["gr"] = df.groupby(["b", "c"]).count() def test_setitem_benchmark(self): @@ -741,6 +741,18 @@ def test_setitem_rhs_dataframe(self): df.isetitem(0, DataFrame({"a": [10, 11]}, index=[1, 2])) tm.assert_frame_equal(df, expected) + def test_setitem_frame_overwrite_with_ea_dtype(self, any_numeric_ea_dtype): + # GH#46896 + df = DataFrame(columns=["a", "b"], data=[[1, 2], [3, 4]]) + df["a"] = DataFrame({"a": [10, 11]}, dtype=any_numeric_ea_dtype) + expected = DataFrame( + { + "a": Series([10, 11], dtype=any_numeric_ea_dtype), + "b": [2, 4], + } + ) + tm.assert_frame_equal(df, expected) + class TestSetitemTZAwareValues: @pytest.fixture @@ -903,6 +915,19 @@ def test_frame_setitem_rangeindex_into_new_col(self): expected = DataFrame({"a": ["b"], "b": [100]}, index=[1]) tm.assert_frame_equal(result, expected) + def test_setitem_frame_keep_ea_dtype(self, any_numeric_ea_dtype): + # GH#46896 + df = DataFrame(columns=["a", "b"], data=[[1, 2], [3, 4]]) + df["c"] = DataFrame({"a": [10, 11]}, dtype=any_numeric_ea_dtype) + expected = DataFrame( + { + "a": [1, 3], + "b": [2, 4], + "c": Series([10, 11], dtype=any_numeric_ea_dtype), + } + ) + tm.assert_frame_equal(df, expected) + class TestDataFrameSetItemSlicing: def test_setitem_slice_position(self):
- [x] closes #46896 (Replace xxxx with the Github issue number) - [x] closes #20476 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Setting with duplicate columns is still buggy, but could not figure this out efficiently yet, will open a follow up when this is merged
https://api.github.com/repos/pandas-dev/pandas/pulls/48094
2022-08-15T20:01:14Z
2022-08-17T00:58:24Z
2022-08-17T00:58:24Z
2022-08-17T07:39:35Z
TST: Filter/test pyarrow PerformanceWarnings
diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index 55a6cc48ebfc8..762fc060c0ffd 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -4,6 +4,9 @@ import numpy as np import pytest +from pandas.compat import pa_version_under7p0 +from pandas.errors import PerformanceWarning + import pandas as pd from pandas import ( DatetimeIndex, @@ -36,8 +39,16 @@ def test_value_counts(index_or_series_obj): # TODO(GH#32514): Order of entries with the same count is inconsistent # on CI (gh-32449) if obj.duplicated().any(): - result = result.sort_index() - expected = expected.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + result = result.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + expected = expected.sort_index() tm.assert_series_equal(result, expected) @@ -70,8 +81,16 @@ def test_value_counts_null(null_obj, index_or_series_obj): if obj.duplicated().any(): # TODO(GH#32514): # Order of entries with the same count is inconsistent on CI (gh-32449) - expected = expected.sort_index() - result = result.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + expected = expected.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + result = result.sort_index() if not isinstance(result.dtype, np.dtype): # i.e IntegerDtype @@ -84,8 +103,16 @@ def test_value_counts_null(null_obj, index_or_series_obj): if obj.duplicated().any(): # TODO(GH#32514): # Order of entries with the same count is inconsistent on CI (gh-32449) - expected = expected.sort_index() - result = result.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + expected = expected.sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + result = result.sort_index() tm.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_string.py b/pandas/tests/extension/test_string.py index e4293d6d70e38..fb7c0b32ff16d 100644 --- a/pandas/tests/extension/test_string.py +++ b/pandas/tests/extension/test_string.py @@ -18,7 +18,10 @@ import numpy as np import pytest -from pandas.compat import pa_version_under6p0 +from pandas.compat import ( + pa_version_under6p0, + pa_version_under7p0, +) from pandas.errors import PerformanceWarning import pandas as pd @@ -167,6 +170,22 @@ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna): class TestMethods(base.BaseMethodsTests): + def test_argsort(self, data_for_sorting): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_argsort(data_for_sorting) + + def test_argsort_missing(self, data_missing_for_sorting): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_missing_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_argsort_missing(data_missing_for_sorting) + def test_argmin_argmax( self, data_for_sorting, data_missing_for_sorting, na_value, request ): @@ -210,6 +229,89 @@ def test_argreduce_series( data_missing_for_sorting, op_name, skipna, expected ) + @pytest.mark.parametrize("dropna", [True, False]) + def test_value_counts(self, all_data, dropna, request): + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(all_data.dtype, "storage", "") == "pyarrow" + and not (dropna and "data_missing" in request.node.nodeid), + ): + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(other.dtype, "storage", "") == "pyarrow" + and not (dropna and "data_missing" in request.node.nodeid), + ): + expected = pd.Series(other).value_counts(dropna=dropna).sort_index() + + self.assert_series_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning") + def test_value_counts_with_normalize(self, data): + super().test_value_counts_with_normalize(data) + + def test_argsort_missing_array(self, data_missing_for_sorting): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_missing_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_argsort_missing(data_missing_for_sorting) + + @pytest.mark.parametrize( + "na_position, expected", + [ + ("last", np.array([2, 0, 1], dtype=np.dtype("intp"))), + ("first", np.array([1, 2, 0], dtype=np.dtype("intp"))), + ], + ) + def test_nargsort(self, data_missing_for_sorting, na_position, expected): + # GH 25439 + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_missing_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_nargsort(data_missing_for_sorting, na_position, expected) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values(self, data_for_sorting, ascending, sort_by_key): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_sort_values(data_for_sorting, ascending, sort_by_key) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_missing( + self, data_missing_for_sorting, ascending, sort_by_key + ): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_missing_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_sort_values_missing( + data_missing_for_sorting, ascending, sort_by_key + ) + + @pytest.mark.parametrize("ascending", [True, False]) + def test_sort_values_frame(self, data_for_sorting, ascending): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_sorting.dtype, "storage", "") == "pyarrow", + ): + super().test_sort_values_frame(data_for_sorting, ascending) + class TestCasting(base.BaseCastingTests): pass @@ -236,8 +338,41 @@ class TestPrinting(base.BasePrintingTests): class TestGroupBy(base.BaseGroupbyTests): - def test_groupby_extension_transform(self, data_for_grouping, request): - super().test_groupby_extension_transform(data_for_grouping) + @pytest.mark.parametrize("as_index", [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_grouping.dtype, "storage", "") == "pyarrow", + ): + result = df.groupby("B", as_index=as_index).A.mean() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_grouping.dtype, "storage", "") == "pyarrow", + ): + _, uniques = pd.factorize(data_for_grouping, sort=True) + + if as_index: + index = pd.Index._with_infer(uniques, name="B") + expected = pd.Series([3.0, 1.0, 4.0], index=index, name="A") + self.assert_series_equal(result, expected) + else: + expected = pd.DataFrame({"B": uniques, "A": [3.0, 1.0, 4.0]}) + self.assert_frame_equal(result, expected) + + def test_groupby_extension_transform(self, data_for_grouping): + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 + and getattr(data_for_grouping.dtype, "storage", "") == "pyarrow", + ): + super().test_groupby_extension_transform(data_for_grouping) + + @pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning") + def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op) class Test2DCompat(base.Dim2CompatTests): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index def63c552e059..eb2b8be2d716b 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -9,6 +9,8 @@ algos as libalgos, hashtable as ht, ) +from pandas.compat import pa_version_under7p0 +from pandas.errors import PerformanceWarning import pandas.util._test_decorators as td from pandas.core.dtypes.common import ( @@ -50,7 +52,13 @@ class TestFactorize: @pytest.mark.parametrize("sort", [True, False]) def test_factorize(self, index_or_series_obj, sort): obj = index_or_series_obj - result_codes, result_uniques = obj.factorize(sort=sort) + with tm.maybe_produces_warning( + PerformanceWarning, + sort + and pa_version_under7p0 + and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + result_codes, result_uniques = obj.factorize(sort=sort) constructor = Index if isinstance(obj, MultiIndex): @@ -64,7 +72,11 @@ def test_factorize(self, index_or_series_obj, sort): expected_uniques = expected_uniques.astype(object) if sort: - expected_uniques = expected_uniques.sort_values() + with tm.maybe_produces_warning( + PerformanceWarning, + pa_version_under7p0 and getattr(obj.dtype, "storage", "") == "pyarrow", + ): + expected_uniques = expected_uniques.sort_values() # construct an integer ndarray so that # `expected_uniques.take(expected_codes)` is equal to `obj`
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). * Add `tm.maybe_produces_warning` to some calls that could produce `PerformanceWarnings` based on the pyarrow version. * If a test has multiple lines that could produce the warning and another test had a similar call that was checking the `PerformanceWarnings`, just added a `pytest.mark.filterwarning` to reduced code-duplication.
https://api.github.com/repos/pandas-dev/pandas/pulls/48093
2022-08-15T19:13:56Z
2022-08-19T21:17:14Z
2022-08-19T21:17:14Z
2022-08-19T21:41:12Z
ENH: move an exception and add a prehook to check for exception place…
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cef3d6aea5d27..da0162ce7e160 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -236,6 +236,14 @@ repos: entry: python scripts/validate_min_versions_in_sync.py language: python files: ^(ci/deps/actions-.*-minimum_versions\.yaml|pandas/compat/_optional\.py)$ + - id: validate-errors-locations + name: Validate errors locations + description: Validate errors are in approriate locations. + entry: python scripts/validate_exception_location.py + language: python + files: ^pandas/ + exclude: ^(pandas/_libs/|pandas/tests/|pandas/errors/__init__.py$|pandas/_version.py) + types: [python] - id: flake8-pyi name: flake8-pyi entry: flake8 --extend-ignore=E301,E302,E305,E701,E704 diff --git a/doc/source/reference/testing.rst b/doc/source/reference/testing.rst index 1144c767942d4..07624e87d82e0 100644 --- a/doc/source/reference/testing.rst +++ b/doc/source/reference/testing.rst @@ -38,9 +38,13 @@ Exceptions and warnings errors.IncompatibilityWarning errors.IndexingError errors.InvalidColumnName + errors.InvalidComparison errors.InvalidIndexError + errors.InvalidVersion errors.IntCastingNaNError + errors.LossySetitemError errors.MergeError + errors.NoBufferPresent errors.NullFrequencyError errors.NumbaUtilError errors.NumExprClobberingError diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 413597f6c3748..02925afc63918 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -34,6 +34,7 @@ Other enhancements - :func:`assert_frame_equal` now shows the first element where the DataFrames differ, analogously to ``pytest``'s output (:issue:`47910`) - Added ``index`` parameter to :meth:`DataFrame.to_dict` (:issue:`46398`) - Added metadata propagation for binary operators on :class:`DataFrame` (:issue:`28283`) +- :class:`.CategoricalConversionWarning`, :class:`.InvalidComparison`, :class:`.InvalidVersion`, :class:`.LossySetitemError`, and :class:`.NoBufferPresent` are now exposed in ``pandas.errors`` (:issue:`27656`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 4f92afd048c2e..707db65533540 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -65,6 +65,7 @@ from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, + InvalidComparison, NullFrequencyError, PerformanceWarning, ) @@ -153,15 +154,6 @@ DatetimeLikeArrayT = TypeVar("DatetimeLikeArrayT", bound="DatetimeLikeArrayMixin") -class InvalidComparison(Exception): - """ - Raised by _validate_comparison_value to indicate to caller it should - return invalid_comparison. - """ - - pass - - class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5809acbd55380..75a0db3233130 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -40,7 +40,10 @@ DtypeObj, Scalar, ) -from pandas.errors import IntCastingNaNError +from pandas.errors import ( + IntCastingNaNError, + LossySetitemError, +) from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg @@ -2103,11 +2106,3 @@ def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: if not len(rng): return True return np.can_cast(rng[0], dtype) and np.can_cast(rng[-1], dtype) - - -class LossySetitemError(Exception): - """ - Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. - """ - - pass diff --git a/pandas/core/interchange/column.py b/pandas/core/interchange/column.py index dc24c928d1f39..f375048563c70 100644 --- a/pandas/core/interchange/column.py +++ b/pandas/core/interchange/column.py @@ -6,6 +6,7 @@ from pandas._libs.lib import infer_dtype from pandas._libs.tslibs import iNaT +from pandas.errors import NoBufferPresent from pandas.util._decorators import cache_readonly import pandas as pd @@ -23,7 +24,6 @@ from pandas.core.interchange.utils import ( ArrowCTypes, Endianness, - NoBufferPresent, dtype_to_arrow_c_fmt, ) diff --git a/pandas/core/interchange/utils.py b/pandas/core/interchange/utils.py index 1d56af94b2629..aa717d05aecb5 100644 --- a/pandas/core/interchange/utils.py +++ b/pandas/core/interchange/utils.py @@ -89,7 +89,3 @@ def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: raise NotImplementedError( f"Conversion of {dtype} to Arrow C format string is not implemented." ) - - -class NoBufferPresent(Exception): - """Exception to signal that there is no requested buffer.""" diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index d0c9ef94f4453..3e4f116953cb3 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -12,6 +12,8 @@ OutOfBoundsTimedelta, ) +from pandas.util.version import InvalidVersion + class IntCastingNaNError(ValueError): """ @@ -535,6 +537,24 @@ class CategoricalConversionWarning(Warning): """ +class LossySetitemError(Exception): + """ + Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. + """ + + +class NoBufferPresent(Exception): + """ + Exception is raised in _get_data_buffer to signal that there is no requested buffer. + """ + + +class InvalidComparison(Exception): + """ + Exception is raised by _validate_comparison_value to indicate an invalid comparison. + """ + + __all__ = [ "AbstractMethodError", "AccessorRegistrationWarning", @@ -550,9 +570,13 @@ class CategoricalConversionWarning(Warning): "IncompatibilityWarning", "IntCastingNaNError", "InvalidColumnName", + "InvalidComparison", "InvalidIndexError", + "InvalidVersion", "IndexingError", + "LossySetitemError", "MergeError", + "NoBufferPresent", "NullFrequencyError", "NumbaUtilError", "NumExprClobberingError", diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index c6ca51b7763d9..5dffee587adcb 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -11,33 +11,37 @@ @pytest.mark.parametrize( "exc", [ - "UnsupportedFunctionCall", - "UnsortedIndexError", - "OutOfBoundsDatetime", - "ParserError", - "PerformanceWarning", + "AttributeConflictWarning", + "CSSWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "DataError", + "DatabaseError", "DtypeWarning", "EmptyDataError", - "ParserWarning", + "IncompatibilityWarning", + "IndexingError", + "InvalidColumnName", + "InvalidComparison", + "InvalidVersion", + "LossySetitemError", "MergeError", - "OptionError", - "NumbaUtilError", - "DataError", - "SpecificationError", - "SettingWithCopyError", - "SettingWithCopyWarning", + "NoBufferPresent", "NumExprClobberingError", - "IndexingError", - "PyperclipException", - "CSSWarning", - "ClosedFileError", + "NumbaUtilError", + "OptionError", + "OutOfBoundsDatetime", + "ParserError", + "ParserWarning", + "PerformanceWarning", "PossibleDataLossError", - "IncompatibilityWarning", - "AttributeConflictWarning", - "DatabaseError", "PossiblePrecisionLoss", - "CategoricalConversionWarning", - "InvalidColumnName", + "PyperclipException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UnsortedIndexError", + "UnsupportedFunctionCall", "ValueLabelTypeMismatch", ], ) diff --git a/scripts/pandas_errors_documented.py b/scripts/pandas_errors_documented.py index 18db5fa10a8f9..52c1e2008b8a0 100644 --- a/scripts/pandas_errors_documented.py +++ b/scripts/pandas_errors_documented.py @@ -1,5 +1,5 @@ """ -Check that doc/source/reference/general_utility_functions.rst documents +Check that doc/source/reference/testing.rst documents all exceptions and warnings in pandas/errors/__init__.py. This is meant to be run as a pre-commit hook - to run it manually, you can do: diff --git a/scripts/tests/test_validate_exception_location.py b/scripts/tests/test_validate_exception_location.py new file mode 100644 index 0000000000000..9d493ee04d1c2 --- /dev/null +++ b/scripts/tests/test_validate_exception_location.py @@ -0,0 +1,59 @@ +import pytest + +from scripts.validate_exception_location import ( + ERROR_MESSAGE, + validate_exception_and_warning_placement, +) + +PATH = "t.py" + +# ERRORS_IN_TESTING_RST is the set returned when parsing testing.rst for all the +# exceptions and warnings. +CUSTOM_EXCEPTION_NOT_IN_TESTING_RST = "MyException" +CUSTOM_EXCEPTION__IN_TESTING_RST = "MyOldException" +ERRORS_IN_TESTING_RST = {CUSTOM_EXCEPTION__IN_TESTING_RST} + +TEST_CODE = """ +import numpy as np +import sys + +def my_func(): + pass + +class {custom_name}({error_type}): + pass + +""" + + +# Test with various python-defined exceptions to ensure they are all flagged. [email protected](params=["Exception", "ValueError", "Warning", "UserWarning"]) +def error_type(request): + return request.param + + +def test_class_that_inherits_an_exception_and_is_not_in_the_testing_rst_is_flagged( + capsys, error_type +): + content = TEST_CODE.format( + custom_name=CUSTOM_EXCEPTION_NOT_IN_TESTING_RST, error_type=error_type + ) + expected_msg = ERROR_MESSAGE.format(errors=CUSTOM_EXCEPTION_NOT_IN_TESTING_RST) + with pytest.raises(SystemExit, match=None): + validate_exception_and_warning_placement(PATH, content, ERRORS_IN_TESTING_RST) + result_msg, _ = capsys.readouterr() + assert result_msg == expected_msg + + +def test_class_that_inherits_an_exception_but_is_in_the_testing_rst_is_not_flagged( + capsys, error_type +): + content = TEST_CODE.format( + custom_name=CUSTOM_EXCEPTION__IN_TESTING_RST, error_type=error_type + ) + validate_exception_and_warning_placement(PATH, content, ERRORS_IN_TESTING_RST) + + +def test_class_that_does_not_inherit_an_exception_is_not_flagged(capsys): + content = "class MyClass(NonExceptionClass): pass" + validate_exception_and_warning_placement(PATH, content, ERRORS_IN_TESTING_RST) diff --git a/scripts/validate_exception_location.py b/scripts/validate_exception_location.py new file mode 100644 index 0000000000000..ebbe6c95a3ec9 --- /dev/null +++ b/scripts/validate_exception_location.py @@ -0,0 +1,105 @@ +""" +Validate that the exceptions and warnings are in appropriate places. + +Checks for classes that inherit a python exception and warning and +flags them, unless they are exempted from checking. Exempt meaning +the exception/warning is defined in testing.rst. Testing.rst contains +a list of pandas defined exceptions and warnings. This list is kept +current by other pre-commit hook, pandas_errors_documented.py. +This hook maintains that errors.__init__.py and testing.rst are in-sync. +Therefore, the exception or warning should be defined or imported in +errors.__init__.py. Ideally, the exception or warning is defined unless +there's special reason to import it. + +Prints the exception/warning that do not follow this convention. + +Usage:: + +As a pre-commit hook: + pre-commit run validate-errors-locations --all-files +""" +from __future__ import annotations + +import argparse +import ast +import pathlib +import sys +from typing import Sequence + +API_PATH = pathlib.Path("doc/source/reference/testing.rst").resolve() +ERROR_MESSAGE = ( + "The following exception(s) and/or warning(s): {errors} exist(s) outside of " + "pandas/errors/__init__.py. Please either define them in " + "pandas/errors/__init__.py. Or, if not possible then import them in " + "pandas/errors/__init__.py.\n" +) + + +def get_warnings_and_exceptions_from_api_path() -> set[str]: + with open(API_PATH) as f: + doc_errors = { + line.split(".")[1].strip() for line in f.readlines() if "errors" in line + } + return doc_errors + + +class Visitor(ast.NodeVisitor): + def __init__(self, path: str, exception_set: set[str]) -> None: + self.path = path + self.exception_set = exception_set + self.found_exceptions = set() + + def visit_ClassDef(self, node) -> None: + def is_an_exception_subclass(base_id: str) -> bool: + return ( + base_id == "Exception" + or base_id.endswith("Warning") + or base_id.endswith("Error") + ) + + exception_classes = [] + + # Go through the class's bases and check if they are an Exception or Warning. + for base in node.bases: + base_id = getattr(base, "id", None) + if base_id and is_an_exception_subclass(base_id): + exception_classes.append(base_id) + + # The class subclassed an Exception or Warning so add it to the list. + if exception_classes: + self.found_exceptions.add(node.name) + + +def validate_exception_and_warning_placement( + file_path: str, file_content: str, errors: set[str] +): + tree = ast.parse(file_content) + visitor = Visitor(file_path, errors) + visitor.visit(tree) + + misplaced_exceptions = visitor.found_exceptions.difference(errors) + + # If misplaced_exceptions isn't an empty list then there exists + # pandas-defined Exception or Warnings outside of pandas/errors/__init__.py, so + # we should flag them. + if misplaced_exceptions: + msg = ERROR_MESSAGE.format(errors=", ".join(misplaced_exceptions)) + sys.stdout.write(msg) + sys.exit(1) + + +def main(argv: Sequence[str] | None = None) -> None: + parser = argparse.ArgumentParser() + parser.add_argument("paths", nargs="*") + args = parser.parse_args(argv) + + error_set = get_warnings_and_exceptions_from_api_path() + + for path in args.paths: + with open(path, encoding="utf-8") as fd: + content = fd.read() + validate_exception_and_warning_placement(path, content, error_set) + + +if __name__ == "__main__": + main()
- [x] closes #27656. - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). This is a follow up from my last MR: https://github.com/pandas-dev/pandas/pull/47901. I'm new to prehooks, so I may have went in the wrong direction for the prehook. But, I was thinking of checking the files for class that inherit Exceptions and flagging them. Let me know if another approach is preferred. I also noticed some other exceptions while working on this. - InvalidVersion. I tried moving this one, but got a circular import so I'm importing it in the errors.__init__.py instead. - LossySetitemError, NoBufferPresent, and InvalidComparison are moved to errors.__init__.py. - NotThisMethod. It seem like a code generated exception rather than a custom pandas one, so I'm importing it in the errors.__init__.py instead.
https://api.github.com/repos/pandas-dev/pandas/pulls/48088
2022-08-15T16:49:51Z
2022-09-20T18:31:49Z
2022-09-20T18:31:49Z
2022-10-13T16:59:59Z
ENH: Make categories setitem error more readable
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b50ddd42997cb..0d228582992d2 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1588,7 +1588,7 @@ def _validate_scalar(self, fill_value): raise TypeError( "Cannot setitem on a Categorical with a new " f"category ({fill_value}), set the categories first" - ) + ) from None return fill_value # -------------------------------------------------------------
- [X] closes #46646
https://api.github.com/repos/pandas-dev/pandas/pulls/48087
2022-08-15T15:46:16Z
2022-08-15T18:13:05Z
2022-08-15T18:13:05Z
2022-08-15T18:13:14Z
ENH: Add axis parameter to add_prefix and add_suffix
diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 83dfacb46784b..6b7ae767d79d4 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -28,7 +28,7 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ -- +- :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support an ``axis`` argument. If ``axis`` is set, the default behaviour of which axis to consider can be overwritten (:issue:`47819`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index abab32ae145bd..f91d15e1a6487 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4619,7 +4619,7 @@ def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) @final - def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: + def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. @@ -4630,6 +4630,10 @@ def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: ---------- prefix : str The string to add before each label. + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Axis to add prefix on + + .. versionadded:: 1.6.0 Returns ------- @@ -4675,7 +4679,11 @@ def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: """ f = functools.partial("{prefix}{}".format, prefix=prefix) - mapper = {self._info_axis_name: f} + axis_name = self._info_axis_name + if axis is not None: + axis_name = self._get_axis_name(axis) + + mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type @@ -4683,7 +4691,7 @@ def add_prefix(self: NDFrameT, prefix: str) -> NDFrameT: return self._rename(**mapper) # type: ignore[return-value, arg-type] @final - def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT: + def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. @@ -4694,6 +4702,10 @@ def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT: ---------- suffix : str The string to add after each label. + axis : {{0 or 'index', 1 or 'columns', None}}, default None + Axis to add suffix on + + .. versionadded:: 1.6.0 Returns ------- @@ -4739,7 +4751,11 @@ def add_suffix(self: NDFrameT, suffix: str) -> NDFrameT: """ f = functools.partial("{}{suffix}".format, suffix=suffix) - mapper = {self._info_axis_name: f} + axis_name = self._info_axis_name + if axis is not None: + axis_name = self._get_axis_name(axis) + + mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type diff --git a/pandas/tests/frame/methods/test_add_prefix_suffix.py b/pandas/tests/frame/methods/test_add_prefix_suffix.py index ea75e9ff51552..92d7cdd7990e1 100644 --- a/pandas/tests/frame/methods/test_add_prefix_suffix.py +++ b/pandas/tests/frame/methods/test_add_prefix_suffix.py @@ -1,3 +1,5 @@ +import pytest + from pandas import Index import pandas._testing as tm @@ -18,3 +20,30 @@ def test_add_prefix_suffix(float_frame): with_pct_suffix = float_frame.add_suffix("%") expected = Index([f"{c}%" for c in float_frame.columns]) tm.assert_index_equal(with_pct_suffix.columns, expected) + + +def test_add_prefix_suffix_axis(float_frame): + # GH 47819 + with_prefix = float_frame.add_prefix("foo#", axis=0) + expected = Index([f"foo#{c}" for c in float_frame.index]) + tm.assert_index_equal(with_prefix.index, expected) + + with_prefix = float_frame.add_prefix("foo#", axis=1) + expected = Index([f"foo#{c}" for c in float_frame.columns]) + tm.assert_index_equal(with_prefix.columns, expected) + + with_pct_suffix = float_frame.add_suffix("#foo", axis=0) + expected = Index([f"{c}#foo" for c in float_frame.index]) + tm.assert_index_equal(with_pct_suffix.index, expected) + + with_pct_suffix = float_frame.add_suffix("#foo", axis=1) + expected = Index([f"{c}#foo" for c in float_frame.columns]) + tm.assert_index_equal(with_pct_suffix.columns, expected) + + +def test_add_prefix_suffix_invalid_axis(float_frame): + with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"): + float_frame.add_prefix("foo#", axis=2) + + with pytest.raises(ValueError, match="No axis named 2 for object type DataFrame"): + float_frame.add_suffix("foo#", axis=2) diff --git a/pandas/tests/series/methods/test_add_prefix_suffix.py b/pandas/tests/series/methods/test_add_prefix_suffix.py new file mode 100644 index 0000000000000..289a56b98b7e1 --- /dev/null +++ b/pandas/tests/series/methods/test_add_prefix_suffix.py @@ -0,0 +1,41 @@ +import pytest + +from pandas import Index +import pandas._testing as tm + + +def test_add_prefix_suffix(string_series): + with_prefix = string_series.add_prefix("foo#") + expected = Index([f"foo#{c}" for c in string_series.index]) + tm.assert_index_equal(with_prefix.index, expected) + + with_suffix = string_series.add_suffix("#foo") + expected = Index([f"{c}#foo" for c in string_series.index]) + tm.assert_index_equal(with_suffix.index, expected) + + with_pct_prefix = string_series.add_prefix("%") + expected = Index([f"%{c}" for c in string_series.index]) + tm.assert_index_equal(with_pct_prefix.index, expected) + + with_pct_suffix = string_series.add_suffix("%") + expected = Index([f"{c}%" for c in string_series.index]) + tm.assert_index_equal(with_pct_suffix.index, expected) + + +def test_add_prefix_suffix_axis(string_series): + # GH 47819 + with_prefix = string_series.add_prefix("foo#", axis=0) + expected = Index([f"foo#{c}" for c in string_series.index]) + tm.assert_index_equal(with_prefix.index, expected) + + with_pct_suffix = string_series.add_suffix("#foo", axis=0) + expected = Index([f"{c}#foo" for c in string_series.index]) + tm.assert_index_equal(with_pct_suffix.index, expected) + + +def test_add_prefix_suffix_invalid_axis(string_series): + with pytest.raises(ValueError, match="No axis named 1 for object type Series"): + string_series.add_prefix("foo#", axis=1) + + with pytest.raises(ValueError, match="No axis named 1 for object type Series"): + string_series.add_suffix("foo#", axis=1)
- [X] closes #47819 - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48085
2022-08-15T12:18:17Z
2022-08-31T17:14:36Z
2022-08-31T17:14:36Z
2022-10-13T16:59:58Z
REF: avoid internals in merge code
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3e3ff68bb8719..50aaac211c7a5 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -83,7 +83,6 @@ import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.frame import _merge_doc -from pandas.core.internals import concatenate_managers from pandas.core.sorting import is_int64_overflow_possible if TYPE_CHECKING: @@ -716,28 +715,69 @@ def __init__( if validate is not None: self._validate(validate) - def get_result(self, copy: bool = True) -> DataFrame: - if self.indicator: - self.left, self.right = self._indicator_pre_merge(self.left, self.right) - - join_index, left_indexer, right_indexer = self._get_join_info() + def _reindex_and_concat( + self, + join_index: Index, + left_indexer: npt.NDArray[np.intp] | None, + right_indexer: npt.NDArray[np.intp] | None, + copy: bool, + ) -> DataFrame: + """ + reindex along index and concat along columns. + """ + # Take views so we do not alter the originals + left = self.left[:] + right = self.right[:] llabels, rlabels = _items_overlap_with_suffix( self.left._info_axis, self.right._info_axis, self.suffixes ) - lindexers = {1: left_indexer} if left_indexer is not None else {} - rindexers = {1: right_indexer} if right_indexer is not None else {} + if left_indexer is not None: + # Pinning the index here (and in the right code just below) is not + # necessary, but makes the `.take` more performant if we have e.g. + # a MultiIndex for left.index. + lmgr = left._mgr.reindex_indexer( + join_index, + left_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + left = left._constructor(lmgr) + left.index = join_index + + if right_indexer is not None: + rmgr = right._mgr.reindex_indexer( + join_index, + right_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + right = right._constructor(rmgr) + right.index = join_index - result_data = concatenate_managers( - [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], - axes=[llabels.append(rlabels), join_index], - concat_axis=0, - copy=copy, - ) + from pandas import concat - typ = self.left._constructor - result = typ(result_data).__finalize__(self, method=self._merge_type) + result = concat([left, right], axis=1, copy=copy) + result.columns = llabels.append(rlabels) + return result + + def get_result(self, copy: bool = True) -> DataFrame: + if self.indicator: + self.left, self.right = self._indicator_pre_merge(self.left, self.right) + + join_index, left_indexer, right_indexer = self._get_join_info() + + result = self._reindex_and_concat( + join_index, left_indexer, right_indexer, copy=copy + ) + result = result.__finalize__(self, method=self._merge_type) if self.indicator: result = self._indicator_post_merge(result) @@ -1725,19 +1765,9 @@ def get_result(self, copy: bool = True) -> DataFrame: left_join_indexer = left_indexer right_join_indexer = right_indexer - lindexers = {1: left_join_indexer} if left_join_indexer is not None else {} - rindexers = {1: right_join_indexer} if right_join_indexer is not None else {} - - result_data = concatenate_managers( - [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], - axes=[llabels.append(rlabels), join_index], - concat_axis=0, - copy=copy, + result = self._reindex_and_concat( + join_index, left_join_indexer, right_join_indexer, copy=copy ) - - typ = self.left._constructor - result = typ(result_data) - self._maybe_add_join_keys(result, left_indexer, right_indexer) return result
After this, concatenate_block_managers is only used in one place.
https://api.github.com/repos/pandas-dev/pandas/pulls/48082
2022-08-15T01:53:35Z
2022-08-17T00:55:02Z
2022-08-17T00:55:02Z
2022-09-17T18:56:44Z
ENH: Add support for groupby.ohlc for ea dtypes
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index b71d294b97f9a..b042f37e71e38 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -287,6 +287,7 @@ Other enhancements - ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`) - :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, :class:`.IndexingError`, :class:`.PyperclipException`, :class:`.PyperclipWindowsException`, :class:`.CSSWarning`, :class:`.PossibleDataLossError`, :class:`.ClosedFileError`, :class:`.IncompatibilityWarning`, :class:`.AttributeConflictWarning`, :class:`.DatabaseError, :class:`.PossiblePrecisionLoss, :class:`.ValueLabelTypeMismatch, :class:`.InvalidColumnName, and :class:`.CategoricalConversionWarning` are now exposed in ``pandas.errors`` (:issue:`27656`) - Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`) +- Add support for :meth:`GroupBy.ohlc` for extension array dtypes (:issue:`37493`) - Allow reading compressed SAS files with :func:`read_sas` (e.g., ``.sas7bdat.gz`` files) - :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`) - :class:`Series` reducers (e.g. ``min``, ``max``, ``sum``, ``mean``) will now successfully operate when the dtype is numeric and ``numeric_only=True`` is provided; previously this would raise a ``NotImplementedError`` (:issue:`47500`) diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 3ec37718eb652..55662ff6c7494 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -86,11 +86,13 @@ def group_mean( result_mask: np.ndarray | None = ..., ) -> None: ... def group_ohlc( - out: np.ndarray, # floating[:, ::1] + out: np.ndarray, # floatingintuint_t[:, ::1] counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[floating, ndim=2] + values: np.ndarray, # ndarray[floatingintuint_t, ndim=2] labels: np.ndarray, # const intp_t[:] min_count: int = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., ) -> None: ... def group_quantile( out: npt.NDArray[np.float64], diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 6e2b79a320dd7..e8206df5b47f1 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -835,21 +835,32 @@ def group_mean( out[i, j] = sumx[i, j] / count +ctypedef fused int64float_t: + float32_t + float64_t + int64_t + uint64_t + + @cython.wraparound(False) @cython.boundscheck(False) def group_ohlc( - floating[:, ::1] out, + int64float_t[:, ::1] out, int64_t[::1] counts, - ndarray[floating, ndim=2] values, + ndarray[int64float_t, ndim=2] values, const intp_t[::1] labels, Py_ssize_t min_count=-1, + const uint8_t[:, ::1] mask=None, + uint8_t[:, ::1] result_mask=None, ) -> None: """ Only aggregates on axis=0 """ cdef: Py_ssize_t i, j, N, K, lab - floating val + int64float_t val + uint8_t[::1] first_element_set + bint isna_entry, uses_mask = not mask is None assert min_count == -1, "'min_count' only used in sum and prod" @@ -863,7 +874,15 @@ def group_ohlc( if K > 1: raise NotImplementedError("Argument 'values' must have only one dimension") - out[:] = np.nan + + if int64float_t is float32_t or int64float_t is float64_t: + out[:] = np.nan + else: + out[:] = 0 + + first_element_set = np.zeros((<object>counts).shape, dtype=np.uint8) + if uses_mask: + result_mask[:] = True with nogil: for i in range(N): @@ -873,11 +892,22 @@ def group_ohlc( counts[lab] += 1 val = values[i, 0] - if val != val: + + if uses_mask: + isna_entry = mask[i, 0] + elif int64float_t is float32_t or int64float_t is float64_t: + isna_entry = val != val + else: + isna_entry = False + + if isna_entry: continue - if out[lab, 0] != out[lab, 0]: + if not first_element_set[lab]: out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val + first_element_set[lab] = True + if uses_mask: + result_mask[lab] = False else: out[lab, 1] = max(out[lab, 1], val) out[lab, 2] = min(out[lab, 2], val) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index caea70e03b6f3..038e4afdbd767 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -157,6 +157,7 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: "first", "rank", "sum", + "ohlc", } _cython_arity = {"ohlc": 4} # OHLC @@ -219,13 +220,13 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: values = ensure_float64(values) elif values.dtype.kind in ["i", "u"]: - if how in ["var", "prod", "mean", "ohlc"] or ( + if how in ["var", "prod", "mean"] or ( self.kind == "transform" and self.has_dropped_na ): # result may still include NaN, so we have to cast values = ensure_float64(values) - elif how == "sum": + elif how in ["sum", "ohlc"]: # Avoid overflow during group op if values.dtype.kind == "i": values = ensure_int64(values) @@ -480,6 +481,9 @@ def _masked_ea_wrap_cython_operation( **kwargs, ) + if self.how == "ohlc": + result_mask = np.tile(result_mask, (4, 1)).T + # res_values should already have the correct dtype, we just need to # wrap in a MaskedArray return orig_values._maybe_mask_result(res_values, result_mask) @@ -592,6 +596,8 @@ def _call_cython_op( min_count=min_count, is_datetimelike=is_datetimelike, ) + elif self.how == "ohlc": + func(result, counts, values, comp_ids, min_count, mask, result_mask) else: func(result, counts, values, comp_ids, min_count) else: diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 54ee32502bbc9..bda4d0da9f6ce 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -564,6 +564,22 @@ def test_order_aggregate_multiple_funcs(): tm.assert_index_equal(result, expected) +def test_ohlc_ea_dtypes(any_numeric_ea_dtype): + # GH#37493 + df = DataFrame( + {"a": [1, 1, 2, 3, 4, 4], "b": [22, 11, pd.NA, 10, 20, pd.NA]}, + dtype=any_numeric_ea_dtype, + ) + result = df.groupby("a").ohlc() + expected = DataFrame( + [[22, 22, 11, 11], [pd.NA] * 4, [10] * 4, [20] * 4], + columns=MultiIndex.from_product([["b"], ["open", "high", "low", "close"]]), + index=Index([1, 2, 3, 4], dtype=any_numeric_ea_dtype, name="a"), + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [np.int64, np.uint64]) @pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"]) def test_uint64_type_handling(dtype, how):
- [x] xref #37493 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This did not work at all before cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48081
2022-08-14T20:10:41Z
2022-08-15T17:47:37Z
2022-08-15T17:47:37Z
2022-08-15T17:48:04Z
PDEP-3: Small data visualization consolidation
diff --git a/web/pandas/pdeps/0003-small-data-visualization-consolidation.md b/web/pandas/pdeps/0003-small-data-visualization-consolidation.md new file mode 100644 index 0000000000000..33bc815c42ace --- /dev/null +++ b/web/pandas/pdeps/0003-small-data-visualization-consolidation.md @@ -0,0 +1,158 @@ +# PDEP-3: Small Data Visualization Consolidation + +- Created: 13 August 2022 +- Status: Draft +- Discussion: [#48080](https://github.com/pandas-dev/pandas/pull/48080) +- Author: [JHM Darbyshire](https://github.com/attack68) +- Revision: 1 + +## Abstract + +Developing consistent and flexible output methods for small data, +which are well documented and decoupled from `DataFrame` methods. + +## Motivation and Scope + +Large data is not in scope for this PDEP. Large Data is classed as that which +would not be expected to print, or be wire-transferred for example, with LaTeX, +HTML or JSON. + +An important part of data analysis is data exploration and data visualization. +`Styler` already exists as a part of Pandas, and whose original purpose was +for conditional formatting for data cells in HTML. A rough timeline of +activity of Styler development has been: + - 2015: released as conditional HTML formatting + - 2015-2020: expanded CSS functionality and added built in conditional + formatting functions. + - 2020: performance enhancements, + - 2021: expanded built in methods, expanded formatting functions, + performance enhancements, added LaTeX as an alternative to HTML, + - 2022: added concat to combine the output of multiple Stylers, documentation + and consistency improvements. + +With the feature enhancements in 2021/22 coupled with performance improvements +it is appropriate to re-target the Styler away from an HTML conditional +formatting only tool. + +The overall design philosophy for Styler is proposed as: + + - i) Function as a decoupled display formatting tool. + - ii) Provide output methods for small data within a consistent API framework. + - iii) Provide enhanced formatting techniques such as hiding, color, font and border control to output methods that support it, and document these. + - iv) Should be the only displaying rendering method for HTML and LaTeX. + +## Detailed Description + +*Styler Philosophy - i) Decoupled display formatter:* + +This involves creating output without altering, or the need to pre-alter any of +the in memory data of the DataFrame. +This serves the purpose of maintaining high performance DataFrame indexing +and data features without penalising display. + +```python +>>> df = DataFrame({"data": range(100)}, + columns=date_range("2022-07-01", periods=100)) +>>> df.loc["2022-08-10": "2022-08-14"].style.format_index(lambda v: v.strftime("%a")) + data +Wed -0.245266 +Thu 0.123456 +Fri 1.123456 +Sat -1.124578 +Sun 0.999888 +``` + +It also allows concatenation of display without the need to concatenate DataFrames +and avoids mixing column dtypes. + +```python +>>> df = DataFrame({"norm": np.random.randn(10000), + "poisson": np.random.poisson(1, 10000)}) +>>> df_summary = df.agg(["mean"]) +>>> pd.options.styler.render.max_rows = 5 +>>> df.style.concat(df_summary.style) + norm poisson +0 -0.111111 0 +1 0.111111 1 +2 1.234567 0 +3 -2.666777 1 +4 0.443355 2 +... ... ... +mean 0.001122 0.999999 +``` + +This design choice allows it to be separated from core functions, +permitting a forked component in the future if ever necessary. + +*Styler Philosophy - ii) Output methods for small data* + +"Within a consistent framework" means that the method chaining construct +should be applicable to all output methods, for example the following should +all experience similar behaviour (as well as others): + + - `styler.hide(*args).format(*args).to_latex()` + - `styler.hide(*args).format(*args).to_html()` + - `styler.hide(*args).format(*args).to_json()` + +Any exceptions to this rule are documented and transparent (for example Excel +cannot currently implement either `hide` or `format`. This allows for generalised +styler construction with the output method determinable at render time. + +The full list of proposed output methods from Styler is +- `to_html`, (implemented with jinja2 templates: fully functional) +- `to_latex`, (implemented with jinja2 templates: fully functional) +- `to_json`, (not implemented) +- `to_string`, (implemented with jinja2 templates: basic initial commit version, + needs extension for appropriate console printing) +- `to_csv`, (not implemented, albeit indirectly available via to_string) +- `to_excel`, (available via augmenting DataFrame.to_excel: this is not decoupled) + +*Styler Philosophy -iii) Enhanced formatting and documentation* + +For the most visual of outputs, HTML and LaTeX the suite of functionality +predominantly exists. For excel progress has been towards unification +but still some features are incompatible. + +Documentation development is an important aspect here to unify all +methods and give user examples. + +*Styler Philosophy -iv) Only renderer for HTML and LaTeX* + +DataFrame LatexFormatter and HTMLFormatter exist for legacy implementations. +They have comparatively fewer features, are less performant by some metrics, +and in the case of HTML contain deprecated HTML, and potentially non-CSP valid +output (e.g. inline styles). The proposal is to keep `DataFrame.to_html` and +`DataFrame.to_latex` methods but redesign their arguments signature with a +view to being more consistent with the arguments of Styler, and create output +via the Styler implementation, thus with a requirement for `jinja2`. + +## Usage and Impact + +It is expected that these tools could feasibly be used by any user. +Small data users may have requirements to use the tools directly, +whilst big data users will often create summary tables to explore and +examine the data where this would otherwise be useful. + +Providing consistent functionality across outputs and well +documented formatting features will add to the overall appeal +of the pandas package and promote its longevity, as a "single, +fully featured package". + +## Implementation + +A number of release notes, 1.1, 1.2, 1.3, 1.4, have already documented +development towards these objectives, + +The required implementation is to: + + - advance the outstanding output methods that are not yet implemented, + or partly implemented, or are partly conforming to the philosophy + (e.g. Styler.to_excel) + - synchronise the mentioned DataFrame output methods to utilise the + Styler implementations, and alter any keyword arguments. + - revise and restructure the documentation to present Styler as a + holistic output formatter with superior examples of relevant formats. + +### PDEP History + +- 13 August 2022: Initial draft
This is the official Pdep and design objectives I have been unofficially publishing PRs towards. Some items like reimplementing `DataFrame.to_html` and `DataFrame.to_latex` to use `jinja2` suffer from ad-hoc and inconsistent PRs and PR reviews. It seems appropriate for a wider discussion and either adoption or rejection of these concepts.
https://api.github.com/repos/pandas-dev/pandas/pulls/48080
2022-08-14T13:29:20Z
2023-03-24T17:46:44Z
null
2023-03-24T17:46:45Z
ENH: Support mask for groupby var and mean
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 2de1f25fceace..90cb31577a1b4 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -560,6 +560,45 @@ def time_frame_agg(self, dtype, method): self.df.groupby("key").agg(method) +class GroupByCythonAggEaDtypes: + """ + Benchmarks specifically targeting our cython aggregation algorithms + (using a big enough dataframe with simple key, so a large part of the + time is actually spent in the grouped aggregation). + """ + + param_names = ["dtype", "method"] + params = [ + ["Float64", "Int64", "Int32"], + [ + "sum", + "prod", + "min", + "max", + "mean", + "median", + "var", + "first", + "last", + "any", + "all", + ], + ] + + def setup(self, dtype, method): + N = 1_000_000 + df = DataFrame( + np.random.randint(0, high=100, size=(N, 10)), + columns=list("abcdefghij"), + dtype=dtype, + ) + df["key"] = np.random.randint(0, 100, size=N) + self.df = df + + def time_frame_agg(self, dtype, method): + self.df.groupby("key").agg(method) + + class Cumulative: param_names = ["dtype", "method"] params = [ diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 4b7f136be3c16..6ea2d02924e65 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -100,6 +100,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ +- Performance improvement in :meth:`.GroupBy.mean` and :meth:`.GroupBy.var` for extension array dtypes (:issue:`37493`) - Performance improvement for :meth:`MultiIndex.unique` (:issue:`48335`) - diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 04db0c9b90bc5..8722092809ed9 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -78,6 +78,8 @@ def group_var( labels: np.ndarray, # const intp_t[:] min_count: int = ..., # Py_ssize_t ddof: int = ..., # int64_t + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., ) -> None: ... def group_mean( out: np.ndarray, # floating[:, ::1] diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 299dfdf177d91..0c368f9421932 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -759,6 +759,8 @@ def group_var( const intp_t[::1] labels, Py_ssize_t min_count=-1, int64_t ddof=1, + const uint8_t[:, ::1] mask=None, + uint8_t[:, ::1] result_mask=None, ) -> None: cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) @@ -766,6 +768,7 @@ def group_var( floating[:, ::1] mean int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) + bint isna_entry, uses_mask = not mask is None assert min_count == -1, "'min_count' only used in sum and prod" @@ -790,8 +793,12 @@ def group_var( for j in range(K): val = values[i, j] - # not nan - if val == val: + if uses_mask: + isna_entry = mask[i, j] + else: + isna_entry = not val == val + + if not isna_entry: nobs[lab, j] += 1 oldmean = mean[lab, j] mean[lab, j] += (val - oldmean) / nobs[lab, j] @@ -801,7 +808,10 @@ def group_var( for j in range(K): ct = nobs[i, j] if ct <= ddof: - out[i, j] = NAN + if uses_mask: + result_mask[i, j] = True + else: + out[i, j] = NAN else: out[i, j] /= (ct - ddof) @@ -839,9 +849,9 @@ def group_mean( is_datetimelike : bool True if `values` contains datetime-like entries. mask : ndarray[bool, ndim=2], optional - Not used. + Mask of the input values. result_mask : ndarray[bool, ndim=2], optional - Not used. + Mask of the out array Notes ----- @@ -855,6 +865,7 @@ def group_mean( mean_t[:, ::1] sumx, compensation int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) + bint isna_entry, uses_mask = not mask is None assert min_count == -1, "'min_count' only used in sum and prod" @@ -867,7 +878,12 @@ def group_mean( compensation = np.zeros((<object>out).shape, dtype=(<object>out).base.dtype) N, K = (<object>values).shape - nan_val = NPY_NAT if is_datetimelike else NAN + if uses_mask: + nan_val = 0 + elif is_datetimelike: + nan_val = NPY_NAT + else: + nan_val = NAN with nogil: for i in range(N): @@ -878,8 +894,15 @@ def group_mean( counts[lab] += 1 for j in range(K): val = values[i, j] - # not nan - if val == val and not (is_datetimelike and val == NPY_NAT): + + if uses_mask: + isna_entry = mask[i, j] + elif is_datetimelike: + isna_entry = val == NPY_NAT + else: + isna_entry = not val == val + + if not isna_entry: nobs[lab, j] += 1 y = val - compensation[lab, j] t = sumx[lab, j] + y @@ -890,7 +913,12 @@ def group_mean( for j in range(K): count = nobs[i, j] if nobs[i, j] == 0: - out[i, j] = nan_val + + if uses_mask: + result_mask[i, j] = True + else: + out[i, j] = nan_val + else: out[i, j] = sumx[i, j] / count diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 418a222a0bfa6..c118c7f16af8f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -160,6 +160,8 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: "ohlc", "cumsum", "prod", + "mean", + "var", } _cython_arity = {"ohlc": 4} # OHLC @@ -598,7 +600,7 @@ def _call_cython_op( min_count=min_count, is_datetimelike=is_datetimelike, ) - elif self.how in ["ohlc", "prod"]: + elif self.how in ["var", "ohlc", "prod"]: func( result, counts, @@ -607,9 +609,10 @@ def _call_cython_op( min_count=min_count, mask=mask, result_mask=result_mask, + **kwargs, ) else: - func(result, counts, values, comp_ids, min_count, **kwargs) + func(result, counts, values, comp_ids, min_count) else: # TODO: min_count if self.uses_mask():
- [x] xref #37493 (Replace xxxx with the Github issue number) - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This saves around 30-35% in the aggregation operation for nullable dtypes with 1/3 missing values compared to the previous implementation cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48078
2022-08-14T10:11:09Z
2022-09-02T19:48:13Z
2022-09-02T19:48:13Z
2022-10-13T16:59:57Z
Bug: Unexpected behavior when assigning multi-dimensional array to DataFrame column
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 93a2c20cd0b74..60cef111af2b0 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3948,6 +3948,9 @@ def isetitem(self, loc, value) -> None: `frame[frame.columns[i]] = value`. """ arraylike = self._sanitize_column(value) + loc_num = len(loc) if isinstance(loc, list) else 1 + if value.shape[1] > loc_num: + raise ValueError("could not broadcast input array to dataframe") self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): @@ -4182,6 +4185,15 @@ def _set_item(self, key, value) -> None: if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T + if key in self: + num_col_value = 1 if len(value.shape) == 1 else value.shape[1] + num_col_self = 1 if len(self[key].shape) == 1 else self[key].shape[1] + + if num_col_value > num_col_self: + raise ValueError( + "Shape of new values must be compatible with manager shape" + ) + self._set_item_mgr(key, value) def _set_value( diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index c030647297b9e..6ceb99f6a2574 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1891,12 +1891,14 @@ def _setitem_with_indexer_2d_value(self, indexer, value): pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) - # GH#7551 Note that this coerces the dtype if we are mixed value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError( - "Must have equal len keys and value when setting with an ndarray" + "shape mismatch: value array of shape ({},{}) could not be broadcast " + "to indexing result of shape ({},{})".format( + value.shape[0], value.shape[1], len(self.obj), 2 + ) ) for i, loc in enumerate(ilocs): diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index dcf69dfda1ae8..6f5b78c2d14ab 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -846,7 +846,8 @@ def iset( # TODO can we avoid needing to unpack this here? That means converting # DataFrame into 1D array when loc is an integer if isinstance(value, np.ndarray) and value.ndim == 2: - assert value.shape[1] == 1 + if value.shape[1] != 1: + raise ValueError("could not broadcast input array to dataframe") value = value[:, 0] # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 3084bcea49f05..c462c30b04cf4 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1175,9 +1175,7 @@ def iset( value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: - raise AssertionError( - "Shape of new values must be compatible with manager shape" - ) + raise ValueError("could not broadcast input array to dataframe") if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 6d2becd7a32d2..5c77bce9374c2 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -750,6 +750,40 @@ def test_setitem_frame_overwrite_with_ea_dtype(self, any_numeric_ea_dtype): ) tm.assert_frame_equal(df, expected) + @pytest.mark.parametrize( + "rhs_values, rhs_error_values", + [ + ( + np.array([[5], [5], [5]], dtype="int64"), + np.array([[5, 6, 7], [5, 6, 7], [5, 6, 7]], dtype="int64"), + ), + ( + DataFrame([[5], [5], [5]], columns=["foo"]), + DataFrame( + [[5, 6, 7], [5, 6, 7], [5, 6, 7]], columns=["foo", "foo", "foo"] + ), + ), + ], + ) + def test_isetitem_incompatible_array(self, rhs_values, rhs_error_values): + # GH#40827 + df = DataFrame( + [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + + result = df.copy() + result.isetitem(0, rhs_values) + expected = DataFrame( + [[5, 2, 3, 4], [5, 2, 3, 4], [5, 2, 3, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + tm.assert_frame_equal(result, expected) + + msg = "could not broadcast input array to dataframe" + with pytest.raises(ValueError, match=msg): + result.isetitem(0, rhs_error_values) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 8cc6b6e73aaea..56851db04582a 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -1230,6 +1230,40 @@ def test_iloc_setitem_multicolumn_to_datetime(self, using_array_manager): ) tm.assert_frame_equal(df, expected, check_dtype=using_array_manager) + @pytest.mark.parametrize( + "rhs_values, rhs_error_values", + [ + ( + np.array([[5, 6], [5, 6], [5, 6]], dtype="int64"), + np.array([[5, 6, 7], [5, 6, 7], [5, 6, 7]], dtype="int64"), + ), + ( + DataFrame([[5, 6], [5, 6], [5, 6]], columns=["foo", "foo"]), + DataFrame( + [[5, 6, 7], [5, 6, 7], [5, 6, 7]], columns=["foo", "foo", "foo"] + ), + ), + ], + ) + def test_iloc_setitem_incompatible_array(self, rhs_values, rhs_error_values): + # GH#40827 + df = DataFrame( + [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + result = df.copy() + result.iloc[:, [0, 2]] = rhs_values + + expected = DataFrame( + [[5, 2, 6, 4], [5, 2, 6, 4], [5, 2, 6, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + tm.assert_frame_equal(result, expected) + + msg = "cannot reindex on an axis with duplicate labels" + with pytest.raises(ValueError, match=msg): + df.loc[:, [0, 2]] = rhs_error_values + class TestILocErrors: # NB: this test should work for _any_ Series we can pass as diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index cf7db65015fa7..58864f20399aa 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -222,6 +222,45 @@ def test_loc_getitem_single_boolean_arg(self, obj, key, exp): else: assert res == exp + @pytest.mark.parametrize( + "rhs_values, rhs_error_values, msg", + [ + ( + np.array([[5, 6], [5, 6], [5, 6]], dtype="int64"), + np.array([[5, 6, 7], [5, 6, 7], [5, 6, 7]], dtype="int64"), + "|".join( + [ + r"shape mismatch: value array of shape \(3,3\)", + r"could not be broadcast to indexing result of shape \(3,2\)", + ] + ), + ), + ( + DataFrame([[5, 6], [5, 6], [5, 6]], columns=["foo", "foo"]), + DataFrame( + [[5, 6, 7], [5, 6, 7], [5, 6, 7]], columns=["foo", "foo", "foo"] + ), + "cannot reindex on an axis with duplicate labels", + ), + ], + ) + def test_loc_setitem_incompatible_array(self, rhs_values, rhs_error_values, msg): + # GH#40827 + df = DataFrame( + [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + result = df.copy() + result.loc[:, ["foo"]] = rhs_values + expected = DataFrame( + [[5, 2, 6, 4], [5, 2, 6, 4], [5, 2, 6, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + df.loc[:, ["foo"]] = rhs_error_values + class TestLocBaseIndependent: # Tests for loc that do not depend on subclassing Base diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 90051405c6935..98f6ffb54c63b 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1660,3 +1660,38 @@ def test_setitem_empty_mask_dont_upcast_dt64(): ser.mask(mask, "foo", inplace=True) assert ser.dtype == dti.dtype # no-op -> dont upcast tm.assert_series_equal(ser, orig) + + [email protected]( + "rhs_values, rhs_error_values, msg", + [ + ( + np.array([[5, 6], [5, 6], [5, 6]], dtype="int64"), + np.array([[5, 6, 7], [5, 6, 7], [5, 6, 7]], dtype="int64"), + "Shape of new values must be compatible with manager shape", + ), + ( + DataFrame([[5, 6], [5, 6], [5, 6]], columns=["foo", "foo"]), + DataFrame([[5, 6, 7], [5, 6, 7], [5, 6, 7]], columns=["foo", "foo", "foo"]), + "Columns must be same length as key", + ), + ], +) +def test_regular_setitem_incompatible_array(rhs_values, rhs_error_values, msg): + # GH#40827 + df = DataFrame( + [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + + result = df.copy() + result["foo"] = rhs_values + + expected = DataFrame( + [[5, 2, 6, 4], [5, 2, 6, 4], [5, 2, 6, 4]], + columns=["foo", "bar", "foo", "hello"], + ) + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + df["foo"] = rhs_error_values
- [ ] closes #40827 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48077
2022-08-14T07:24:35Z
2022-08-25T16:01:53Z
null
2022-08-25T16:01:53Z
Backport PR #48065 on branch 1.4.x (CI/DEPS: Fix timezone test due to pytz upgrade)
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py index 50c5a91fc2390..9c6d6a686e9a5 100644 --- a/pandas/tests/tseries/offsets/test_dst.py +++ b/pandas/tests/tseries/offsets/test_dst.py @@ -31,6 +31,10 @@ ) from pandas.tests.tseries.offsets.test_offsets import get_utc_offset_hours +from pandas.util.version import Version + +# error: Module has no attribute "__version__" +pytz_version = Version(pytz.__version__) # type: ignore[attr-defined] class TestDST: @@ -186,9 +190,8 @@ def test_all_offset_classes(self, tup): MonthBegin(66), "Africa/Kinshasa", marks=pytest.mark.xfail( - # error: Module has no attribute "__version__" - float(pytz.__version__) <= 2020.1, # type: ignore[attr-defined] - reason="GH#41906", + pytz_version < Version("2020.5") or pytz_version == Version("2022.2"), + reason="GH#41906: pytz utc transition dates changed", ), ), (
Backport PR #48065: CI/DEPS: Fix timezone test due to pytz upgrade
https://api.github.com/repos/pandas-dev/pandas/pulls/48076
2022-08-14T05:39:14Z
2022-08-14T09:43:11Z
2022-08-14T09:43:11Z
2022-08-14T09:43:11Z
TST: Address test warnings
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 8a261f09e7118..f1a8a70674582 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -973,7 +973,7 @@ def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: result: dict[Hashable, NDFrame | np.ndarray] = {} if self.axis == 0: # test_pass_args_kwargs_duplicate_columns gets here with non-unique columns - for name, data in self: + for name, data in self.grouper.get_iterator(obj, self.axis): fres = func(data, *args, **kwargs) result[name] = fres else: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 138474e21fb57..67f645c008ac3 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -828,7 +828,7 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: ( "In a future version of pandas, a length 1 " "tuple will be returned when iterating over a " - "a groupby with a grouper equal to a list of " + "groupby with a grouper equal to a list of " "length 1. Don't supply a list with a single grouper " "to avoid this warning." ), diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 2890b7930611c..0b2fb56a02006 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -214,6 +214,7 @@ def func_kwargs(values, index): @td.skip_if_no("numba") [email protected]("ignore") def test_multiindex_one_key(nogil, parallel, nopython): def numba_func(values, index): return 1 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index d290aada18293..a63e0df25b160 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2801,7 +2801,7 @@ def test_single_element_list_grouping(): ) msg = ( "In a future version of pandas, a length 1 " - "tuple will be returned when iterating over a " + "tuple will be returned when iterating over " "a groupby with a grouper equal to a list of " "length 1. Don't supply a list with a single grouper " "to avoid this warning." diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index 0e26cdc294b55..2b70d7325a209 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -202,6 +202,7 @@ def func_kwargs(values, index): @td.skip_if_no("numba") [email protected]("ignore") def test_multiindex_one_key(nogil, parallel, nopython): def numba_func(values, index): return 1 diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 4ca34bec0a7d9..fa1d6bbfd5a7e 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -89,6 +89,7 @@ def _transfer_marks(engine, read_ext): for ext in read_ext_params if _is_valid_engine_ext_pair(eng, ext) ], + ids=str, ) def engine_and_read_ext(request): """ @@ -654,6 +655,7 @@ def test_read_excel_blank_with_header(self, read_ext): actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1") tm.assert_frame_equal(actual, expected) + @pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl") def test_date_conversion_overflow(self, request, engine, read_ext): # GH 10001 : pandas.ExcelFile ignore parse_dates=False if engine == "pyxlsb": diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index ba6366b71d854..977f278b7d04c 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -878,7 +878,7 @@ def test_to_excel_output_encoding(self, ext): ) with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: - df.to_excel(filename, sheet_name="TestSheet", encoding="utf8") + df.to_excel(filename, sheet_name="TestSheet") result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0) tm.assert_frame_equal(result, df) @@ -1282,6 +1282,7 @@ def test_deprecated_attr(self, engine, ext, attr): # Some engines raise if nothing is written DataFrame().to_excel(writer) + @pytest.mark.filterwarnings("ignore:Calling close():UserWarning:xlsxwriter") @pytest.mark.parametrize( "attr, args", [("save", ()), ("write_cells", ([], "test"))] ) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index d97c6a3dacdc3..6e87c221426c1 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -737,7 +737,9 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix left_grouped = left.groupby(join_col) right_grouped = right.groupby(join_col) - for group_key, group in result.groupby(join_col): + for group_key, group in result.groupby( + join_col if len(join_col) > 1 else join_col[0] + ): l_joined = _restrict_to_columns(group, left.columns, lsuffix) r_joined = _restrict_to_columns(group, right.columns, rsuffix)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). The goal is to enable `pytest -W error:::pandas` to raise potential errors in our test suite due to warnings pandas issues
https://api.github.com/repos/pandas-dev/pandas/pulls/48075
2022-08-14T04:51:56Z
2022-08-19T21:15:02Z
2022-08-19T21:15:02Z
2022-08-19T21:41:04Z
DEPR: `sort_columns` in `plot` (#47563)
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index b71d294b97f9a..7fe970ef36e14 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -845,6 +845,7 @@ Other Deprecations - Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`) - Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) - Fixed up warning message of deprecation of :meth:`MultiIndex.lesort_depth` as public method, as the message previously referred to :meth:`MultiIndex.is_lexsorted` instead (:issue:`38701`) +- Deprecated the ``sort_columns`` argument in :meth:`DataFrame.plot` and :meth:`Series.plot` (:issue:`47563`). .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 0d69a52eb15f1..96f9abb301471 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1,11 +1,14 @@ from __future__ import annotations import importlib +import inspect +import itertools import types from typing import ( TYPE_CHECKING, Sequence, ) +import warnings from pandas._config import get_option @@ -14,6 +17,7 @@ Appender, Substitution, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_integer, @@ -755,6 +759,11 @@ class PlotAccessor(PandasObject): If True, create stacked plot. sort_columns : bool, default False Sort column names to determine plot ordering. + + .. deprecated:: 1.5.0 + The `sort_columns` arguments is deprecated and will be removed in a + future version. + secondary_y : bool or sequence, default False Whether to plot on the secondary y-axis if a list/tuple, which columns to plot on secondary y-axis. @@ -875,6 +884,14 @@ def _get_call_args(backend_name, data, args, kwargs): "expected Series or DataFrame" ) + if "sort_columns" in itertools.chain(args, kwargs.keys()): + warnings.warn( + "`sort_columns` is deprecated and will be removed in a future " + "version.", + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) + if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = ", ".join( diff --git a/pandas/tests/plotting/frame/test_frame.py b/pandas/tests/plotting/frame/test_frame.py index 538c9c2fb5059..b38c9adb0a893 100644 --- a/pandas/tests/plotting/frame/test_frame.py +++ b/pandas/tests/plotting/frame/test_frame.py @@ -2215,6 +2215,19 @@ def test_secondary_y(self, secondary_y): assert ax.get_ylim() == (0, 100) assert ax.get_yticks()[0] == 99 + def test_sort_columns_deprecated(self): + # GH 47563 + df = DataFrame({"a": [1, 2], "b": [3, 4]}) + + with tm.assert_produces_warning(FutureWarning): + df.plot.box("a", sort_columns=True) + + with tm.assert_produces_warning(FutureWarning): + df.plot.box(sort_columns=False) + + with tm.assert_produces_warning(False): + df.plot.box("a") + def _generate_4_axes_via_gridspec(): import matplotlib as mpl
- [x] closes #47563 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v1.5.0.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48073
2022-08-13T20:28:00Z
2022-08-16T00:07:00Z
2022-08-16T00:07:00Z
2022-08-20T12:55:22Z
TST: Check dtype in test_resample_empty_dataframe
diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 1a25749808820..403eeb8db73f9 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -169,13 +169,13 @@ def test_resample_empty_dataframe(empty_frame_dti, freq, resample_method): expected = df.copy() else: # GH14962 - expected = Series([], dtype=object) + expected = Series([], dtype=int) expected.index = _asfreq_compat(df.index, freq) tm.assert_index_equal(result.index, expected.index) assert result.index.freq == expected.index.freq - tm.assert_almost_equal(result, expected, check_dtype=False) + tm.assert_almost_equal(result, expected) # test size for GH13212 (currently stays as df)
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Noticed from https://github.com/pandas-dev/pandas/pull/47672#discussion_r945175211
https://api.github.com/repos/pandas-dev/pandas/pulls/48072
2022-08-13T18:40:41Z
2022-08-15T17:27:58Z
2022-08-15T17:27:58Z
2022-08-15T19:43:09Z
ENH: Support mask in GroupBy.cumsum
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0ceac8aeb9db8..bc5f24e6fb7dd 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -287,7 +287,7 @@ Other enhancements - ``times`` argument in :class:`.ExponentialMovingWindow` now accepts ``np.timedelta64`` (:issue:`47003`) - :class:`.DataError`, :class:`.SpecificationError`, :class:`.SettingWithCopyError`, :class:`.SettingWithCopyWarning`, :class:`.NumExprClobberingError`, :class:`.UndefinedVariableError`, :class:`.IndexingError`, :class:`.PyperclipException`, :class:`.PyperclipWindowsException`, :class:`.CSSWarning`, :class:`.PossibleDataLossError`, :class:`.ClosedFileError`, :class:`.IncompatibilityWarning`, :class:`.AttributeConflictWarning`, :class:`.DatabaseError, :class:`.PossiblePrecisionLoss, :class:`.ValueLabelTypeMismatch, :class:`.InvalidColumnName, and :class:`.CategoricalConversionWarning` are now exposed in ``pandas.errors`` (:issue:`27656`) - Added ``check_like`` argument to :func:`testing.assert_series_equal` (:issue:`47247`) -- Add support for :meth:`GroupBy.ohlc` for extension array dtypes (:issue:`37493`) +- Add support for :meth:`.GroupBy.ohlc` for extension array dtypes (:issue:`37493`) - Allow reading compressed SAS files with :func:`read_sas` (e.g., ``.sas7bdat.gz`` files) - :func:`pandas.read_html` now supports extracting links from table cells (:issue:`13141`) - :meth:`DatetimeIndex.astype` now supports casting timezone-naive indexes to ``datetime64[s]``, ``datetime64[ms]``, and ``datetime64[us]``, and timezone-aware indexes to the corresponding ``datetime64[unit, tzname]`` dtypes (:issue:`47579`) @@ -1078,12 +1078,13 @@ Groupby/resample/rolling - Bug when using ``engine="numba"`` would return the same jitted function when modifying ``engine_kwargs`` (:issue:`46086`) - Bug in :meth:`.DataFrameGroupBy.transform` fails when ``axis=1`` and ``func`` is ``"first"`` or ``"last"`` (:issue:`45986`) - Bug in :meth:`DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`) -- Bug in :meth:`GroupBy.sum` with integer dtypes losing precision (:issue:`37493`) +- Bug in :meth:`.GroupBy.sum` and :meth:`.GroupBy.cumsum` with integer dtypes losing precision (:issue:`37493`) - Bug in :meth:`.GroupBy.cumsum` with ``timedelta64[ns]`` dtype failing to recognize ``NaT`` as a null value (:issue:`46216`) +- Bug in :meth:`.GroupBy.cumsum` with integer dtypes causing overflows when sum was bigger than maximum of dtype (:issue:`37493`) - Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable dtypes incorrectly altering the original data in place (:issue:`46220`) - Bug in :meth:`DataFrame.groupby` raising error when ``None`` is in first level of :class:`MultiIndex` (:issue:`47348`) - Bug in :meth:`.GroupBy.cummax` with ``int64`` dtype with leading value being the smallest possible int64 (:issue:`46382`) -- Bug in :meth:`GroupBy.cumprod` ``NaN`` influences calculation in different columns with ``skipna=False`` (:issue:`48064`) +- Bug in :meth:`.GroupBy.cumprod` ``NaN`` influences calculation in different columns with ``skipna=False`` (:issue:`48064`) - Bug in :meth:`.GroupBy.max` with empty groups and ``uint64`` dtype incorrectly raising ``RuntimeError`` (:issue:`46408`) - Bug in :meth:`.GroupBy.apply` would fail when ``func`` was a string and args or kwargs were supplied (:issue:`46479`) - Bug in :meth:`SeriesGroupBy.apply` would incorrectly name its result when there was a unique group (:issue:`46369`) diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 55662ff6c7494..c8e9df6cd6b38 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -20,12 +20,14 @@ def group_cumprod_float64( skipna: bool = ..., ) -> None: ... def group_cumsum( - out: np.ndarray, # numeric[:, ::1] - values: np.ndarray, # ndarray[numeric, ndim=2] + out: np.ndarray, # int64float_t[:, ::1] + values: np.ndarray, # ndarray[int64float_t, ndim=2] labels: np.ndarray, # const int64_t[:] ngroups: int, is_datetimelike: bool, skipna: bool = ..., + mask: np.ndarray | None = ..., + result_mask: np.ndarray | None = ..., ) -> None: ... def group_shift_indexer( out: np.ndarray, # int64_t[::1] diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index b16c74c48eee7..563abf949dbbc 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -206,15 +206,24 @@ def group_cumprod_float64( accum[lab, j] = NaN +ctypedef fused int64float_t: + int64_t + uint64_t + float32_t + float64_t + + @cython.boundscheck(False) @cython.wraparound(False) def group_cumsum( - numeric_t[:, ::1] out, - ndarray[numeric_t, ndim=2] values, + int64float_t[:, ::1] out, + ndarray[int64float_t, ndim=2] values, const intp_t[::1] labels, int ngroups, bint is_datetimelike, bint skipna=True, + const uint8_t[:, :] mask=None, + uint8_t[:, ::1] result_mask=None, ) -> None: """ Cumulative sum of columns of `values`, in row groups `labels`. @@ -233,6 +242,10 @@ def group_cumsum( True if `values` contains datetime-like entries. skipna : bool If true, ignore nans in `values`. + mask: np.ndarray[uint8], optional + Mask of values + result_mask: np.ndarray[int8], optional + Mask of out array Notes ----- @@ -240,16 +253,22 @@ def group_cumsum( """ cdef: Py_ssize_t i, j, N, K, size - numeric_t val, y, t, na_val - numeric_t[:, ::1] accum, compensation + int64float_t val, y, t, na_val + int64float_t[:, ::1] accum, compensation + uint8_t[:, ::1] accum_mask intp_t lab bint isna_entry, isna_prev = False + bint uses_mask = mask is not None N, K = (<object>values).shape + + if uses_mask: + accum_mask = np.zeros((ngroups, K), dtype="uint8") + accum = np.zeros((ngroups, K), dtype=np.asarray(values).dtype) compensation = np.zeros((ngroups, K), dtype=np.asarray(values).dtype) - na_val = _get_na_val(<numeric_t>0, is_datetimelike) + na_val = _get_na_val(<int64float_t>0, is_datetimelike) with nogil: for i in range(N): @@ -260,23 +279,45 @@ def group_cumsum( for j in range(K): val = values[i, j] - isna_entry = _treat_as_na(val, is_datetimelike) + if uses_mask: + isna_entry = mask[i, j] + else: + isna_entry = _treat_as_na(val, is_datetimelike) if not skipna: - isna_prev = _treat_as_na(accum[lab, j], is_datetimelike) + if uses_mask: + isna_prev = accum_mask[lab, j] + else: + isna_prev = _treat_as_na(accum[lab, j], is_datetimelike) + if isna_prev: - out[i, j] = na_val + if uses_mask: + result_mask[i, j] = True + # Be deterministic, out was initialized as empty + out[i, j] = 0 + else: + out[i, j] = na_val continue if isna_entry: - out[i, j] = na_val + + if uses_mask: + result_mask[i, j] = True + # Be deterministic, out was initialized as empty + out[i, j] = 0 + else: + out[i, j] = na_val + if not skipna: - accum[lab, j] = na_val + if uses_mask: + accum_mask[lab, j] = True + else: + accum[lab, j] = na_val else: # For floats, use Kahan summation to reduce floating-point # error (https://en.wikipedia.org/wiki/Kahan_summation_algorithm) - if numeric_t == float32_t or numeric_t == float64_t: + if int64float_t == float32_t or int64float_t == float64_t: y = val - compensation[lab, j] t = accum[lab, j] + y compensation[lab, j] = t - accum[lab, j] - y @@ -834,13 +875,6 @@ def group_mean( out[i, j] = sumx[i, j] / count -ctypedef fused int64float_t: - float32_t - float64_t - int64_t - uint64_t - - @cython.wraparound(False) @cython.boundscheck(False) def group_ohlc( @@ -1070,7 +1104,7 @@ cdef numeric_t _get_na_val(numeric_t val, bint is_datetimelike): elif numeric_t is int64_t and is_datetimelike: na_val = NPY_NAT else: - # Will not be used, but define to avoid uninitialized warning. + # Used in case of masks na_val = 0 return na_val diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 038e4afdbd767..13df7ba465741 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -158,6 +158,7 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: "rank", "sum", "ohlc", + "cumsum", } _cython_arity = {"ohlc": 4} # OHLC @@ -226,7 +227,7 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: # result may still include NaN, so we have to cast values = ensure_float64(values) - elif how in ["sum", "ohlc"]: + elif how in ["sum", "ohlc", "cumsum"]: # Avoid overflow during group op if values.dtype.kind == "i": values = ensure_int64(values) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index b9f568fc9577b..ca356d94d84f8 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2846,12 +2846,15 @@ def test_single_element_list_grouping(): values, _ = next(iter(df.groupby(["a"]))) -def test_groupby_sum_avoid_casting_to_float(): [email protected]("func", ["sum", "cumsum"]) +def test_groupby_sum_avoid_casting_to_float(func): # GH#37493 val = 922337203685477580 df = DataFrame({"a": 1, "b": [val]}) - result = df.groupby("a").sum() - val + result = getattr(df.groupby("a"), func)() - val expected = DataFrame({"b": [0]}, index=Index([1], name="a")) + if func == "cumsum": + expected = expected.reset_index(drop=True) tm.assert_frame_equal(result, expected) @@ -2868,7 +2871,7 @@ def test_groupby_sum_support_mask(any_numeric_ea_dtype): @pytest.mark.parametrize("val, dtype", [(111, "int"), (222, "uint")]) -def test_groupby_sum_overflow(val, dtype): +def test_groupby_overflow(val, dtype): # GH#37493 df = DataFrame({"a": 1, "b": [val, val]}, dtype=f"{dtype}8") result = df.groupby("a").sum() @@ -2878,3 +2881,19 @@ def test_groupby_sum_overflow(val, dtype): dtype=f"{dtype}64", ) tm.assert_frame_equal(result, expected) + + result = df.groupby("a").cumsum() + expected = DataFrame({"b": [val, val * 2]}, dtype=f"{dtype}64") + tm.assert_frame_equal(result, expected) + + [email protected]("skipna, val", [(True, 3), (False, pd.NA)]) +def test_groupby_cumsum_mask(any_numeric_ea_dtype, skipna, val): + # GH#37493 + df = DataFrame({"a": 1, "b": [1, pd.NA, 2]}, dtype=any_numeric_ea_dtype) + result = df.groupby("a").cumsum(skipna=skipna) + expected = DataFrame( + {"b": [1, pd.NA, val]}, + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_libgroupby.py b/pandas/tests/groupby/test_libgroupby.py index cde9b36fd0bf4..24abbd0f65795 100644 --- a/pandas/tests/groupby/test_libgroupby.py +++ b/pandas/tests/groupby/test_libgroupby.py @@ -183,9 +183,10 @@ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype): tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False) -def test_cython_group_transform_cumsum(any_real_numpy_dtype): [email protected]("np_dtype", ["int64", "uint64", "float32", "float64"]) +def test_cython_group_transform_cumsum(np_dtype): # see gh-4095 - dtype = np.dtype(any_real_numpy_dtype).type + dtype = np.dtype(np_dtype).type pd_op, np_op = group_cumsum, np.cumsum _check_cython_group_transform_cumulative(pd_op, np_op, dtype)
- [x] xref #37493 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. This is a bit too much if else imo, but I would like to use this as a starting point and try refactoring a bit afterwards. This helps with reviewing and also makes it easier to remember every case cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48070
2022-08-13T14:52:58Z
2022-08-18T18:29:50Z
2022-08-18T18:29:50Z
2022-08-18T20:29:13Z
STYLE: upgrade flake8
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dbddba57ef21c..2ca5b5c9b896b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ repos: - id: absolufy-imports files: ^pandas/ - repo: https://github.com/jendrikseipp/vulture - rev: 'v2.4' + rev: 'v2.5' hooks: - id: vulture entry: python scripts/run_vulture.py @@ -46,20 +46,19 @@ repos: exclude: ^pandas/_libs/src/(klib|headers)/ args: [--quiet, '--extensions=c,h', '--headers=h', --recursive, '--filter=-readability/casting,-runtime/int,-build/include_subdir'] - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 5.0.4 hooks: - id: flake8 additional_dependencies: &flake8_dependencies - - flake8==4.0.1 - - flake8-comprehensions==3.7.0 - - flake8-bugbear==21.3.2 + - flake8==5.0.4 + - flake8-bugbear==22.7.1 - pandas-dev-flaker==0.5.0 - repo: https://github.com/PyCQA/isort rev: 5.10.1 hooks: - id: isort - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v2.37.3 hooks: - id: pyupgrade args: [--py38-plus] @@ -239,8 +238,8 @@ repos: types: [pyi] language: python additional_dependencies: - - flake8==4.0.1 - - flake8-pyi==22.7.0 + - flake8==5.0.4 + - flake8-pyi==22.8.1 - id: future-annotations name: import annotations from __future__ entry: 'from __future__ import annotations' diff --git a/environment.yml b/environment.yml index 8866b1b3b7df6..f1472f453b935 100644 --- a/environment.yml +++ b/environment.yml @@ -85,9 +85,8 @@ dependencies: # code checks - black=22.3.0 - cpplint - - flake8=4.0.1 - - flake8-bugbear=21.3.2 # used by flake8, find likely bugs - - flake8-comprehensions=3.7.0 # used by flake8, linting of unnecessary comprehensions + - flake8=5.0.4 + - flake8-bugbear=22.7.1 # used by flake8, find likely bugs - isort>=5.2.1 # check that imports are in the right order - mypy=0.971 - pre-commit>=2.15.0 diff --git a/pandas/tests/arrays/categorical/test_repr.py b/pandas/tests/arrays/categorical/test_repr.py index 3454c8bb90941..b44af07cee01d 100644 --- a/pandas/tests/arrays/categorical/test_repr.py +++ b/pandas/tests/arrays/categorical/test_repr.py @@ -319,7 +319,7 @@ def test_categorical_repr_timedelta(self): c = Categorical(idx.append(idx), categories=idx) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] -Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa:E501 assert repr(c) == exp @@ -352,7 +352,7 @@ def test_categorical_repr_timedelta_ordered(self): c = Categorical(idx.append(idx), categories=idx, ordered=True) exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] -Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa:E501 assert repr(c) == exp diff --git a/pandas/tests/indexes/categorical/test_formats.py b/pandas/tests/indexes/categorical/test_formats.py index d48f6dc70a2b8..d7812888556ea 100644 --- a/pandas/tests/indexes/categorical/test_formats.py +++ b/pandas/tests/indexes/categorical/test_formats.py @@ -24,7 +24,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], - categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa:E501 assert repr(idx) == expected @@ -55,7 +55,7 @@ def test_string_categorical_index_repr(self): expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 assert repr(idx) == expected @@ -90,7 +90,7 @@ def test_string_categorical_index_repr(self): 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], - categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa:E501 assert repr(idx) == expected diff --git a/pandas/tests/indexes/multi/test_formats.py b/pandas/tests/indexes/multi/test_formats.py index f71472b726fa2..238a3e78169a8 100644 --- a/pandas/tests/indexes/multi/test_formats.py +++ b/pandas/tests/indexes/multi/test_formats.py @@ -184,7 +184,7 @@ def test_tuple_width(self, wide_multi_index): mi = wide_multi_index result = mi[:1].__repr__() expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)], - names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa:E501 assert result == expected result = mi[:10].__repr__() diff --git a/requirements-dev.txt b/requirements-dev.txt index a6e0f9e98b52a..60dd738e43ba3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -64,9 +64,8 @@ flask asv black==22.3.0 cpplint -flake8==4.0.1 -flake8-bugbear==21.3.2 -flake8-comprehensions==3.7.0 +flake8==5.0.4 +flake8-bugbear==22.7.1 isort>=5.2.1 mypy==0.971 pre-commit>=2.15.0 diff --git a/setup.cfg b/setup.cfg index 8f7cfc288ecdb..f2314316f7732 100644 --- a/setup.cfg +++ b/setup.cfg @@ -103,6 +103,12 @@ ignore = # tests use comparisons but not their returned value B015, # false positives + B019, + # Use of functools.lru_cache or functools.cache on methods can lead to memory leaks. + B020 + # Loop control variable overrides iterable it iterates + B023 + # Functions defined inside a loop must not use variables redefined in the loop B301, # single-letter variables PDF023,
- [x] closes #48061 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48069
2022-08-13T12:48:01Z
2022-08-14T16:04:09Z
2022-08-14T16:04:09Z
2022-08-14T18:14:55Z
TST: Check index when grouping all columns of empty DF (#32464)
diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 93e9b5bb776ab..45ac77bac9e02 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1574,3 +1574,15 @@ def test_corrwith_with_1_axis(): ) expected = Series([np.nan] * 6, index=index) tm.assert_series_equal(result, expected) + + +def test_multiindex_group_all_columns_when_empty(groupby_func): + # GH 32464 + df = DataFrame({"a": [], "b": [], "c": []}).set_index(["a", "b", "c"]) + gb = df.groupby(["a", "b", "c"]) + method = getattr(gb, groupby_func) + args = get_groupby_method_args(groupby_func, df) + + result = method(*args).index + expected = df.index + tm.assert_index_equal(result, expected)
- [x] closes #32464 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. _(does not apply)_
https://api.github.com/repos/pandas-dev/pandas/pulls/48067
2022-08-13T00:54:30Z
2022-08-15T21:11:53Z
2022-08-15T21:11:53Z
2022-08-20T12:55:28Z
CI: Avoid flaky build errors & show installed packages in 32 bit build
diff --git a/.github/workflows/32-bit-linux.yml b/.github/workflows/32-bit-linux.yml index e091160c952f8..67e99b4486a12 100644 --- a/.github/workflows/32-bit-linux.yml +++ b/.github/workflows/32-bit-linux.yml @@ -39,8 +39,9 @@ jobs: . ~/virtualenvs/pandas-dev/bin/activate && \ python -m pip install --no-deps -U pip wheel 'setuptools<60.0.0' && \ pip install cython numpy python-dateutil pytz pytest pytest-xdist pytest-asyncio>=0.17 hypothesis && \ - python setup.py build_ext -q -j2 && \ + python setup.py build_ext -q -j1 && \ python -m pip install --no-build-isolation --no-use-pep517 -e . && \ + python -m pip list && \ export PANDAS_CI=1 && \ pytest -m 'not slow and not network and not clipboard and not single_cpu' pandas --junitxml=test-data.xml"
xref #47305 & https://github.com/pandas-dev/pandas/pull/48065#discussion_r944893462
https://api.github.com/repos/pandas-dev/pandas/pulls/48066
2022-08-12T22:30:17Z
2022-08-17T01:59:13Z
2022-08-17T01:59:13Z
2022-08-17T02:12:14Z
CI/DEPS: Fix timezone test due to pytz upgrade
diff --git a/pandas/tests/tseries/offsets/test_dst.py b/pandas/tests/tseries/offsets/test_dst.py index 50c5a91fc2390..9c6d6a686e9a5 100644 --- a/pandas/tests/tseries/offsets/test_dst.py +++ b/pandas/tests/tseries/offsets/test_dst.py @@ -31,6 +31,10 @@ ) from pandas.tests.tseries.offsets.test_offsets import get_utc_offset_hours +from pandas.util.version import Version + +# error: Module has no attribute "__version__" +pytz_version = Version(pytz.__version__) # type: ignore[attr-defined] class TestDST: @@ -186,9 +190,8 @@ def test_all_offset_classes(self, tup): MonthBegin(66), "Africa/Kinshasa", marks=pytest.mark.xfail( - # error: Module has no attribute "__version__" - float(pytz.__version__) <= 2020.1, # type: ignore[attr-defined] - reason="GH#41906", + pytz_version < Version("2020.5") or pytz_version == Version("2022.2"), + reason="GH#41906: pytz utc transition dates changed", ), ), (
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature
https://api.github.com/repos/pandas-dev/pandas/pulls/48065
2022-08-12T19:16:03Z
2022-08-14T05:38:44Z
2022-08-14T05:38:44Z
2022-08-14T05:38:47Z
BUG: Groupby cumprod nan influences other columns with skipna False
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index b71d294b97f9a..92a99098db05c 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1075,6 +1075,7 @@ Groupby/resample/rolling - Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable dtypes incorrectly altering the original data in place (:issue:`46220`) - Bug in :meth:`DataFrame.groupby` raising error when ``None`` is in first level of :class:`MultiIndex` (:issue:`47348`) - Bug in :meth:`.GroupBy.cummax` with ``int64`` dtype with leading value being the smallest possible int64 (:issue:`46382`) +- Bug in :meth:`GroupBy.cumprod` ``NaN`` influences calculation in different columns with ``skipna=False`` (:issue:`48064`) - Bug in :meth:`.GroupBy.max` with empty groups and ``uint64`` dtype incorrectly raising ``RuntimeError`` (:issue:`46408`) - Bug in :meth:`.GroupBy.apply` would fail when ``func`` was a string and args or kwargs were supplied (:issue:`46479`) - Bug in :meth:`SeriesGroupBy.apply` would incorrectly name its result when there was a unique group (:issue:`46369`) diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 6e2b79a320dd7..e33733ed6d14f 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -204,7 +204,6 @@ def group_cumprod_float64( out[i, j] = NaN if not skipna: accum[lab, j] = NaN - break @cython.boundscheck(False) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 93e9b5bb776ab..04c364fab63cf 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -650,6 +650,20 @@ def test_groupby_cumprod(): tm.assert_series_equal(actual, expected) +def test_groupby_cumprod_nan_influences_other_columns(): + # GH#48064 + df = DataFrame( + { + "a": 1, + "b": [1, np.nan, 2], + "c": [1, 2, 3.0], + } + ) + result = df.groupby("a").cumprod(numeric_only=True, skipna=False) + expected = DataFrame({"b": [1, np.nan, np.nan], "c": [1, 2, 6.0]}) + tm.assert_frame_equal(result, expected) + + def scipy_sem(*args, **kwargs): from scipy.stats import sem
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Saw this when working on mask support previously this returned ``` b c 0 22.0 1.0 1 NaN NaN 2 NaN 3.0 ``` because the calculation in the second row was skipped for the second column after the NaN was encountered
https://api.github.com/repos/pandas-dev/pandas/pulls/48064
2022-08-12T19:13:39Z
2022-08-15T17:25:23Z
2022-08-15T17:25:23Z
2022-08-15T17:27:57Z
fixed docstring error with pandas.Series.plot.area:Unknown parameter: {'stacked'}
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 96f9abb301471..8d6c2062f9484 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1484,7 +1484,7 @@ def kde(self, bw_method=None, ind=None, **kwargs) -> PlotAccessor: density = kde - def area(self, x=None, y=None, **kwargs) -> PlotAccessor: + def area(self, x=None, y=None, stacked=True, **kwargs) -> PlotAccessor: """ Draw a stacked area plot. @@ -1555,7 +1555,7 @@ def area(self, x=None, y=None, **kwargs) -> PlotAccessor: ... }) >>> ax = df.plot.area(x='day') """ - return self(kind="area", x=x, y=y, **kwargs) + return self(kind="area", x=x, y=y, stacked=stacked, **kwargs) def pie(self, **kwargs) -> PlotAccessor: """
When you run python `./scripts/validate_docstrings.py --errors=PR02`, one of the errors is `/home/pandas/pandas/plotting/_core.py:1470:PR02:pandas.Series.plot.area:Unknown parameters {'stacked'}` The error I believe is because "stacked" is declared in the docstring, but is not a parameter within the function. Even though stacked is only passed in when setting stacked to False, it should still be defined in the function parameter. - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/48063
2022-08-12T18:29:42Z
2022-08-26T21:40:24Z
2022-08-26T21:40:24Z
2022-08-26T21:40:47Z
REF: make copy keyword non-stateful
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index cb392eee1d589..799becbc652f6 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -117,11 +117,10 @@ def merge( right_index=right_index, sort=sort, suffixes=suffixes, - copy=copy, indicator=indicator, validate=validate, ) - return op.get_result() + return op.get_result(copy=copy) if __debug__: @@ -623,7 +622,6 @@ def __init__( right_index: bool = False, sort: bool = True, suffixes: Suffixes = ("_x", "_y"), - copy: bool = True, indicator: bool = False, validate: str | None = None, ) -> None: @@ -642,7 +640,6 @@ def __init__( self.left_on = com.maybe_make_list(left_on) self.right_on = com.maybe_make_list(right_on) - self.copy = copy self.suffixes = suffixes self.sort = sort @@ -713,7 +710,7 @@ def __init__( if validate is not None: self._validate(validate) - def get_result(self) -> DataFrame: + def get_result(self, copy: bool = True) -> DataFrame: if self.indicator: self.left, self.right = self._indicator_pre_merge(self.left, self.right) @@ -730,7 +727,7 @@ def get_result(self) -> DataFrame: [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], axes=[llabels.append(rlabels), join_index], concat_axis=0, - copy=self.copy, + copy=copy, ) typ = self.left._constructor @@ -1100,7 +1097,7 @@ def _get_merge_keys(self): else: # work-around for merge_asof(right_index=True) right_keys.append(right.index) - if lk is not None and lk == rk: + if lk is not None and lk == rk: # FIXME: what about other NAs? # avoid key upcast in corner case (length-0) if len(left) > 0: right_drop.append(rk) @@ -1637,7 +1634,6 @@ def __init__( right_index: bool = False, axis: int = 1, suffixes: Suffixes = ("_x", "_y"), - copy: bool = True, fill_method: str | None = None, how: str = "outer", ) -> None: @@ -1658,7 +1654,7 @@ def __init__( sort=True, # factorize sorts ) - def get_result(self) -> DataFrame: + def get_result(self, copy: bool = True) -> DataFrame: join_index, left_indexer, right_indexer = self._get_join_info() llabels, rlabels = _items_overlap_with_suffix( @@ -1687,7 +1683,7 @@ def get_result(self) -> DataFrame: [(self.left._mgr, lindexers), (self.right._mgr, rindexers)], axes=[llabels.append(rlabels), join_index], concat_axis=0, - copy=self.copy, + copy=copy, ) typ = self.left._constructor
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48062
2022-08-12T17:42:40Z
2022-08-15T17:21:13Z
2022-08-15T17:21:13Z
2022-08-15T17:42:53Z
REGR: Avoid overflow with groupby sum
diff --git a/pandas/_libs/algos.pyi b/pandas/_libs/algos.pyi index 9ffcf25f6eacd..5a2005722c85c 100644 --- a/pandas/_libs/algos.pyi +++ b/pandas/_libs/algos.pyi @@ -132,6 +132,7 @@ def ensure_int8(arr: object, copy=...) -> npt.NDArray[np.int8]: ... def ensure_int16(arr: object, copy=...) -> npt.NDArray[np.int16]: ... def ensure_int32(arr: object, copy=...) -> npt.NDArray[np.int32]: ... def ensure_int64(arr: object, copy=...) -> npt.NDArray[np.int64]: ... +def ensure_uint64(arr: object, copy=...) -> npt.NDArray[np.uint64]: ... def take_1d_int8_int8( values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=... ) -> None: ... diff --git a/pandas/_libs/algos_common_helper.pxi.in b/pandas/_libs/algos_common_helper.pxi.in index 991566f9b7143..ce2e1ffbb5870 100644 --- a/pandas/_libs/algos_common_helper.pxi.in +++ b/pandas/_libs/algos_common_helper.pxi.in @@ -41,12 +41,12 @@ dtypes = [('float64', 'FLOAT64', 'float64'), ('int16', 'INT16', 'int16'), ('int32', 'INT32', 'int32'), ('int64', 'INT64', 'int64'), + ('uint64', 'UINT64', 'uint64'), # Disabling uint and complex dtypes because we do not use them - # (and compiling them increases wheel size) + # (and compiling them increases wheel size) (except uint64) # ('uint8', 'UINT8', 'uint8'), # ('uint16', 'UINT16', 'uint16'), # ('uint32', 'UINT32', 'uint32'), - # ('uint64', 'UINT64', 'uint64'), # ('complex64', 'COMPLEX64', 'complex64'), # ('complex128', 'COMPLEX128', 'complex128') ] diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e4314edecfa7e..6e2b79a320dd7 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -513,14 +513,7 @@ ctypedef fused mean_t: ctypedef fused sum_t: mean_t - int8_t - int16_t - int32_t int64_t - - uint8_t - uint16_t - uint32_t uint64_t object diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index be4d50af8a053..f0e4a54c3f05c 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -100,6 +100,7 @@ def ensure_float(arr): ensure_int8 = algos.ensure_int8 ensure_platform_int = algos.ensure_platform_int ensure_object = algos.ensure_object +ensure_uint64 = algos.ensure_uint64 def ensure_str(value: bytes | Any) -> str: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 7617ca5074c9c..caea70e03b6f3 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -46,6 +46,7 @@ ensure_float64, ensure_int64, ensure_platform_int, + ensure_uint64, is_1d_only_ea_dtype, is_bool_dtype, is_complex_dtype, @@ -224,6 +225,13 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: # result may still include NaN, so we have to cast values = ensure_float64(values) + elif how == "sum": + # Avoid overflow during group op + if values.dtype.kind == "i": + values = ensure_int64(values) + else: + values = ensure_uint64(values) + return values # TODO: general case implementation overridable by EAs. diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index a7c5b85e365ae..d290aada18293 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2829,3 +2829,16 @@ def test_groupby_sum_support_mask(any_numeric_ea_dtype): dtype=any_numeric_ea_dtype, ) tm.assert_frame_equal(result, expected) + + [email protected]("val, dtype", [(111, "int"), (222, "uint")]) +def test_groupby_sum_overflow(val, dtype): + # GH#37493 + df = DataFrame({"a": 1, "b": [val, val]}, dtype=f"{dtype}8") + result = df.groupby("a").sum() + expected = DataFrame( + {"b": [val * 2]}, + index=Index([1], name="a", dtype=f"{dtype}64"), + dtype=f"{dtype}64", + ) + tm.assert_frame_equal(result, expected)
This is the alternative to #48044. I would very much prefer this one, even though it is a bit slower. cc @mroeschke cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48059
2022-08-12T15:53:33Z
2022-08-12T19:09:38Z
2022-08-12T19:09:38Z
2022-08-12T19:09:41Z
REGR: fix reset_index (Index.insert) regression with custom Index subclasses
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index 25db2ef7253d9..b09d14010545c 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) - Fixed regression in :meth:`DataFrame.loc` setting a length-1 array like value to a single value in the DataFrame (:issue:`46268`) - Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) +- Fixed regression using custom Index subclasses (for example, used in xarray) with :meth:`~DataFrame.reset_index` or :meth:`Index.insert` (:issue:`47071`) - Fixed regression in :meth:`DatetimeIndex.intersection` when the :class:`DatetimeIndex` has dates crossing daylight savings time (:issue:`46702`) - Fixed regression in :func:`merge` throwing an error when passing a :class:`Series` with a multi-level name (:issue:`47946`) - Fixed regression in :meth:`DataFrame.eval` creating a copy when updating inplace (:issue:`47449`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6e620bc072a23..d2fbe22a0b5cb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6912,10 +6912,12 @@ def insert(self, loc: int, item) -> Index: loc = loc if loc >= 0 else loc - 1 new_values[loc] = item - # Use self._constructor instead of Index to retain NumericIndex GH#43921 - # TODO(2.0) can use Index instead of self._constructor - # Check if doing so fixes GH#47071 - return self._constructor._with_infer(new_values, name=self.name) + if self._typ == "numericindex": + # Use self._constructor instead of Index to retain NumericIndex GH#43921 + # TODO(2.0) can use Index instead of self._constructor + return self._constructor._with_infer(new_values, name=self.name) + else: + return Index._with_infer(new_values, name=self.name) def drop( self, diff --git a/pandas/tests/indexes/test_subclass.py b/pandas/tests/indexes/test_subclass.py new file mode 100644 index 0000000000000..2ddf3baabbec0 --- /dev/null +++ b/pandas/tests/indexes/test_subclass.py @@ -0,0 +1,38 @@ +""" +Tests involving custom Index subclasses +""" +import numpy as np + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +class CustomIndex(Index): + def __new__(cls, data, name=None): + # assert that this index class cannot hold strings + if any(isinstance(val, str) for val in data): + raise TypeError("CustomIndex cannot hold strings") + + if name is None and hasattr(data, "name"): + name = data.name + data = np.array(data, dtype="O") + + return cls._simple_new(data, name) + + +def test_insert_fallback_to_base_index(): + # https://github.com/pandas-dev/pandas/issues/47071 + + idx = CustomIndex([1, 2, 3]) + result = idx.insert(0, "string") + expected = Index(["string", 1, 2, 3], dtype=object) + tm.assert_index_equal(result, expected) + + df = DataFrame( + np.random.randn(2, 3), columns=idx, index=Index([1, 2], name="string") + ) + result = df.reset_index() + tm.assert_index_equal(result.columns, expected)
- [x] closes #47071 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. I added a new test file with a minimal custom Index subclass, as I don't know if it can otherwise be tested with out own indexes (and added a new file since I didn't find existing tests for Index subclasses)
https://api.github.com/repos/pandas-dev/pandas/pulls/48058
2022-08-12T14:41:46Z
2022-08-22T19:46:20Z
2022-08-22T19:46:20Z
2022-08-23T05:07:42Z
REGR: fix regression in scalar setitem with setting a length-1 array-like
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index 57b8fdee5888a..32b4d1f8a8012 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) - Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`) - Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) +- Fixed regression in :meth:`DataFrame.loc` setting a length-1 array like value to a single value in the DataFrame (:issue:`46268`) - Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) - diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index b143e1e50aa6c..d7f7941f017de 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1854,8 +1854,10 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str): # We get here in one case via .loc with a all-False mask pass - elif self._is_scalar_access(indexer): - # We are setting nested data + elif self._is_scalar_access(indexer) and is_object_dtype( + self.obj.dtypes[ilocs[0]] + ): + # We are setting nested data, only possible for object dtype data self._setitem_single_column(indexer[1], value, pi) elif len(ilocs) == len(value): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 9d10e487e0cc2..069e5a62895af 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1,5 +1,6 @@ """ test fancy indexing & misc """ +import array from datetime import datetime import re import weakref @@ -1019,3 +1020,95 @@ def test_ser_list_indexer_exceeds_dimensions(indexer_li): res = indexer_li(ser)[[0, 0]] exp = Series([10, 10], index=Index([0, 0])) tm.assert_series_equal(res, exp) + + [email protected]( + "value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])] +) +def test_scalar_setitem_with_nested_value(value): + # For numeric data, we try to unpack and thus raise for mismatching length + df = DataFrame({"A": [1, 2, 3]}) + msg = "|".join( + [ + "Must have equal len keys and value", + "setting an array element with a sequence", + ] + ) + with pytest.raises(ValueError, match=msg): + df.loc[0, "B"] = value + + # TODO For object dtype this happens as well, but should we rather preserve + # the nested data and set as such? + df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)}) + with pytest.raises(ValueError, match="Must have equal len keys and value"): + df.loc[0, "B"] = value + # if isinstance(value, np.ndarray): + # assert (df.loc[0, "B"] == value).all() + # else: + # assert df.loc[0, "B"] == value + + [email protected]( + "value", [(0, 1), [0, 1], np.array([0, 1]), array.array("b", [0, 1])] +) +def test_scalar_setitem_series_with_nested_value(value, indexer_sli): + # For numeric data, we try to unpack and thus raise for mismatching length + ser = Series([1, 2, 3]) + with pytest.raises(ValueError, match="setting an array element with a sequence"): + indexer_sli(ser)[0] = value + + # but for object dtype we preserve the nested data and set as such + ser = Series([1, "a", "b"], dtype=object) + indexer_sli(ser)[0] = value + if isinstance(value, np.ndarray): + assert (ser.loc[0] == value).all() + else: + assert ser.loc[0] == value + + [email protected]( + "value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])] +) +def test_scalar_setitem_with_nested_value_length1(value): + # https://github.com/pandas-dev/pandas/issues/46268 + + # For numeric data, assigning length-1 array to scalar position gets unpacked + df = DataFrame({"A": [1, 2, 3]}) + df.loc[0, "B"] = value + expected = DataFrame({"A": [1, 2, 3], "B": [0.0, np.nan, np.nan]}) + tm.assert_frame_equal(df, expected) + + # but for object dtype we preserve the nested data + df = DataFrame({"A": [1, 2, 3], "B": np.array([1, "a", "b"], dtype=object)}) + df.loc[0, "B"] = value + if isinstance(value, np.ndarray): + assert (df.loc[0, "B"] == value).all() + else: + assert df.loc[0, "B"] == value + + [email protected]( + "value", [(0.0,), [0.0], np.array([0.0]), array.array("d", [0.0])] +) +def test_scalar_setitem_series_with_nested_value_length1(value, indexer_sli): + # For numeric data, assigning length-1 array to scalar position gets unpacked + # TODO this only happens in case of ndarray, should we make this consistent + # for all list-likes? (as happens for DataFrame.(i)loc, see test above) + ser = Series([1.0, 2.0, 3.0]) + if isinstance(value, np.ndarray): + indexer_sli(ser)[0] = value + expected = Series([0.0, 2.0, 3.0]) + tm.assert_series_equal(ser, expected) + else: + with pytest.raises( + ValueError, match="setting an array element with a sequence" + ): + indexer_sli(ser)[0] = value + + # but for object dtype we preserve the nested data + ser = Series([1, "a", "b"], dtype=object) + indexer_sli(ser)[0] = value + if isinstance(value, np.ndarray): + assert (ser.loc[0] == value).all() + else: + assert ser.loc[0] == value
- [x] closes #46268 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. While testing the various related cases, I noticed that we don't have very consistent behaviour always, see the TODO notes in the tests. In pandas 1.3, we always unpacked an array-like when setting, also for object dtype. This means that for object dtype, it unpacked it for len-1, and raised an error for longer arrays: ``` In [31]: df = pd.DataFrame({"A": [1, 2, 3], "B": np.array([1, 'a', 'b'], dtype=object)}) In [32]: df.loc[0, "B"] = np.array([0]) In [33]: df Out[33]: A B 0 1 0 1 2 a 2 3 b In [34]: df.loc[0, "B"] = np.array([0, 1]) ... ValueError: Must have equal len keys and value when setting with an iterable ``` With the change in https://github.com/pandas-dev/pandas/pull/42780, in pandas 1.4 we started to preserve the nested object in case of len-1, but still raising for longer arrays (with object dtype column): ``` In [24]: df = pd.DataFrame({"A": [1, 2, 3], "B": np.array([1, 'a', 'b'], dtype=object)}) In [25]: df.loc[0, "B"] = np.array([0]) In [26]: df Out[26]: A B 0 1 [0] # <----- 1 2 a 2 3 b In [27]: df.loc[1, "B"] = np.array([0, 1]) ... ValueError: Must have equal len keys and value when setting with an iterable ``` This is a bit inconsistent I would say, and I think we should probably long term allow setting nested data of any length _in case of object dtype column_ (the first TODO note). Because of the above change (no longer unpacking for len-1 array), this started to error for _numeric_ columns (i.e. the actual regression reported in #46268). What I did in this PR is limiting the change of https://github.com/pandas-dev/pandas/pull/42780 to only object dtype, so we keep the new behaviour of preserving the nested object when setting in case object dtype, while fixing the regression for numeric data. (strictly speaking, we could also say that it is a regression for the object dtype case, but there I think it was actually an improvement in behaviour, and since this was already changed in 1.3.5, and nobody reported it, it seems better to keep that now) In addition, while testing the same for Series, I noticed that for Series setitem, we only do this unpacking of len-1 array-like values for actual numpy arrays, and not for setting array-likes in general (the second TODO note). TODO: open new issues to keep track of those TODOs and update those comments
https://api.github.com/repos/pandas-dev/pandas/pulls/48057
2022-08-12T13:22:48Z
2022-08-19T17:26:20Z
2022-08-19T17:26:20Z
2022-08-22T11:03:51Z
Fixed docstring error with style.py, unknown parameter: {'text_color_threshold'}
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 0522e113d6525..fe10d4e312a1e 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -2987,7 +2987,10 @@ def _get_numeric_subset_default(self): name="background", alt="text", image_prefix="bg", - text_threshold="", + text_threshold="""text_color_threshold : float or int\n + Luminance threshold for determining text color in [0, 1]. Facilitates text\n + visibility across varying background colors. All text is dark if 0, and\n + light if 1, defaults to 0.408.""", ) @Substitution(subset=subset) def background_gradient( @@ -3026,11 +3029,7 @@ def background_gradient( (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. %(subset)s - text_color_threshold : float or int - {text_threshold} - Luminance threshold for determining text color in [0, 1]. Facilitates text - visibility across varying background colors. All text is dark if 0, and - light if 1, defaults to 0.408. + {text_threshold} vmin : float, optional Minimum data value that corresponds to colormap minimum value. If not specified the minimum value of the data (or gmap) will be used. @@ -3147,7 +3146,7 @@ def background_gradient( name="text", alt="background", image_prefix="tg", - text_threshold="This argument is ignored (only used in `background_gradient`).", + text_threshold="", ) def text_gradient( self,
When you run python` ./scripts/validate_docstrings.py --errors=PR02`, one of the errors is `/home/pandas/pandas/io/formats/style.py:3144:PR02:pandas.io.formats.style.Styler.text_gradient:Unknown parameters {'text_color_threshold'}` The error I believe was because the function text_gradient was using the doc-string for background gradient, which had text_color_threshold as one of the parameters, so validate doc-string expected text_gradient to also have text_color_threshold, even though that parameter was not intended for text_gradient. - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/48055
2022-08-12T10:48:08Z
2022-08-18T15:43:06Z
2022-08-18T15:43:06Z
2022-08-18T15:43:15Z
DOC: add source link to properties
diff --git a/doc/source/conf.py b/doc/source/conf.py index 33c916f532e90..84958595f6618 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -653,12 +653,20 @@ def linkcode_resolve(domain, info): try: fn = inspect.getsourcefile(inspect.unwrap(obj)) except TypeError: - fn = None + try: # property + fn = inspect.getsourcefile(inspect.unwrap(obj.fget)) + except (AttributeError, TypeError): + fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) + except TypeError: + try: # property + source, lineno = inspect.getsourcelines(obj.fget) + except (AttributeError, TypeError): + lineno = None except OSError: lineno = None
The current version of `linkcode_resolve` is unable to handle properties and ignore them. Hence there are no links to the source code for properties unlike for everything else. This add support to properties I did in https://github.com/geopandas/geopandas/pull/2526#issuecomment-1212926545 which is based on pandas code, so I thought it is worth contributing the enhancement back.
https://api.github.com/repos/pandas-dev/pandas/pulls/48054
2022-08-12T10:05:30Z
2022-08-12T20:04:40Z
2022-08-12T20:04:39Z
2022-08-12T21:09:13Z
WEB: Update list of maintainers and improve inactive maintainers format
diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md index 2982105616f47..261d577b2abc1 100644 --- a/web/pandas/about/team.md +++ b/web/pandas/about/team.md @@ -6,10 +6,10 @@ _pandas_ is made with love by more than [2,000 volunteer contributors](https://g If you want to support pandas development, you can find information in the [donations page](../donate.html). -## Maintainers +## Active maintainers <div class="card-group maintainers"> - {% for person in maintainers.people %} + {% for person in maintainers.active_with_github_info %} <div class="card"> <img class="card-img-top" alt="" src="{{ person.avatar_url }}"/> <div class="card-body"> @@ -60,10 +60,14 @@ The project governance is available in the [project governance page](governance. {% endfor %} </ul> -## Emeritus maintainers +## Inactive maintainers <ul> - {% for person in maintainers.emeritus %} - <li>{{ person }}</li> + {% for person in maintainers.inactive_with_github_info %} + <li> + <a href="{{ person.blog or person.html_url }}"> + {{ person.name or person.login }} + </a> + </li> {% endfor %} </ul> diff --git a/web/pandas/config.yml b/web/pandas/config.yml index aa4deaea98a6c..79a77e80830f3 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -68,13 +68,10 @@ maintainers: - wesm - jorisvandenbossche - TomAugspurger - - shoyer - jreback - - chris-b1 - sinhrks - cpcloud - gfyoung - - toobaz - WillAyd - mroeschke - jschendel @@ -93,10 +90,15 @@ maintainers: - attack68 - fangchenli - twoertwein - emeritus: - - Wouter Overmeire - - Skipper Seabold - - Jeff Tratner + - lithomas1 + - mzeitlin11 + inactive: + - lodagro + - jseabold + - jtratner + - shoyer + - chris-b1 + - toobaz coc: - Safia Abdalla - Tom Augspurger diff --git a/web/pandas_web.py b/web/pandas_web.py index 16e9024d8d1d8..290443d1d2970 100755 --- a/web/pandas_web.py +++ b/web/pandas_web.py @@ -148,13 +148,20 @@ def maintainers_add_info(context): Given the active maintainers defined in the yaml file, it fetches the GitHub user information for them. """ - context["maintainers"]["people"] = [] - for user in context["maintainers"]["active"]: - resp = requests.get(f"https://api.github.com/users/{user}") - if context["ignore_io_errors"] and resp.status_code == 403: - return context - resp.raise_for_status() - context["maintainers"]["people"].append(resp.json()) + repeated = set(context["maintainers"]["active"]) & set( + context["maintainers"]["inactive"] + ) + if repeated: + raise ValueError(f"Maintainers {repeated} are both active and inactive") + + for kind in ("active", "inactive"): + context["maintainers"][f"{kind}_with_github_info"] = [] + for user in context["maintainers"][kind]: + resp = requests.get(f"https://api.github.com/users/{user}") + if context["ignore_io_errors"] and resp.status_code == 403: + return context + resp.raise_for_status() + context["maintainers"][f"{kind}_with_github_info"].append(resp.json()) return context @staticmethod
- Adding @lithomas1 and @mzeitlin11 to the maintainers list in the website (sorry it wasn't done when you were promoted) - Moving @shoyer, @chris-b1 and @toobaz to inactive, renaming the section "Inactive maintainers", and using the same logic to display the names with links as we have for active maintainers
https://api.github.com/repos/pandas-dev/pandas/pulls/48053
2022-08-12T05:59:34Z
2022-08-12T17:59:44Z
2022-08-12T17:59:44Z
2022-08-12T17:59:51Z
REF: dont alter self in _validate_specification
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index cb392eee1d589..c84926deb314e 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -639,8 +639,6 @@ def __init__( self.axis = 1 - axis if self.left.ndim == 2 else 0 self.on = com.maybe_make_list(on) - self.left_on = com.maybe_make_list(left_on) - self.right_on = com.maybe_make_list(right_on) self.copy = copy self.suffixes = suffixes @@ -683,7 +681,7 @@ def __init__( msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) ) - self._validate_specification() + self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on) cross_col = None if self.how == "cross": @@ -1075,7 +1073,7 @@ def _get_merge_keys(self): # a pd.merge_asof(left_index=True, left_by=...) will result in a # self.left_on array with a None in the middle of it. This requires # a work-around as designated in the code below. - # See _validate_specification() for where this happens. + # See _validate_left_right_on() for where this happens. # ugh, spaghetti re #733 if _any(self.left_on) and _any(self.right_on): @@ -1323,25 +1321,27 @@ def _create_cross_configuration( cross_col, ) - def _validate_specification(self) -> None: + def _validate_left_right_on(self, left_on, right_on): + left_on = com.maybe_make_list(left_on) + right_on = com.maybe_make_list(right_on) + if self.how == "cross": if ( self.left_index or self.right_index - or self.right_on is not None - or self.left_on is not None + or right_on is not None + or left_on is not None or self.on is not None ): raise MergeError( "Can not pass on, right_on, left_on or set right_index=True or " "left_index=True" ) - return # Hm, any way to make this logic less complicated?? - elif self.on is None and self.left_on is None and self.right_on is None: + elif self.on is None and left_on is None and right_on is None: if self.left_index and self.right_index: - self.left_on, self.right_on = (), () + left_on, right_on = (), () elif self.left_index: raise MergeError("Must pass right_on or right_index=True") elif self.right_index: @@ -1354,8 +1354,8 @@ def _validate_specification(self) -> None: if len(common_cols) == 0: raise MergeError( "No common columns to perform merge on. " - f"Merge options: left_on={self.left_on}, " - f"right_on={self.right_on}, " + f"Merge options: left_on={left_on}, " + f"right_on={right_on}, " f"left_index={self.left_index}, " f"right_index={self.right_index}" ) @@ -1364,9 +1364,9 @@ def _validate_specification(self) -> None: or not right_cols.join(common_cols, how="inner").is_unique ): raise MergeError(f"Data columns not unique: {repr(common_cols)}") - self.left_on = self.right_on = common_cols + left_on = right_on = common_cols elif self.on is not None: - if self.left_on is not None or self.right_on is not None: + if left_on is not None or right_on is not None: raise MergeError( 'Can only pass argument "on" OR "left_on" ' 'and "right_on", not a combination of both.' @@ -1376,40 +1376,42 @@ def _validate_specification(self) -> None: 'Can only pass argument "on" OR "left_index" ' 'and "right_index", not a combination of both.' ) - self.left_on = self.right_on = self.on - elif self.left_on is not None: + left_on = right_on = self.on + elif left_on is not None: if self.left_index: raise MergeError( 'Can only pass argument "left_on" OR "left_index" not both.' ) - if not self.right_index and self.right_on is None: + if not self.right_index and right_on is None: raise MergeError('Must pass "right_on" OR "right_index".') - n = len(self.left_on) + n = len(left_on) if self.right_index: - if len(self.left_on) != self.right.index.nlevels: + if len(left_on) != self.right.index.nlevels: raise ValueError( "len(left_on) must equal the number " 'of levels in the index of "right"' ) - self.right_on = [None] * n - elif self.right_on is not None: + right_on = [None] * n + elif right_on is not None: if self.right_index: raise MergeError( 'Can only pass argument "right_on" OR "right_index" not both.' ) - if not self.left_index and self.left_on is None: + if not self.left_index and left_on is None: raise MergeError('Must pass "left_on" OR "left_index".') - n = len(self.right_on) + n = len(right_on) if self.left_index: - if len(self.right_on) != self.left.index.nlevels: + if len(right_on) != self.left.index.nlevels: raise ValueError( "len(right_on) must equal the number " 'of levels in the index of "left"' ) - self.left_on = [None] * n - if self.how != "cross" and len(self.right_on) != len(self.left_on): + left_on = [None] * n + if self.how != "cross" and len(right_on) != len(left_on): raise ValueError("len(right_on) must equal len(left_on)") + return left_on, right_on + def _validate(self, validate: str) -> None: # Check uniqueness of each @@ -1767,14 +1769,14 @@ def __init__( fill_method=fill_method, ) - def _validate_specification(self) -> None: - super()._validate_specification() + def _validate_left_right_on(self, left_on, right_on): + left_on, right_on = super()._validate_left_right_on(left_on, right_on) # we only allow on to be a single item for on - if len(self.left_on) != 1 and not self.left_index: + if len(left_on) != 1 and not self.left_index: raise MergeError("can only asof on a key for left") - if len(self.right_on) != 1 and not self.right_index: + if len(right_on) != 1 and not self.right_index: raise MergeError("can only asof on a key for right") if self.left_index and isinstance(self.left.index, MultiIndex): @@ -1795,27 +1797,27 @@ def _validate_specification(self) -> None: # GH#29130 Check that merge keys do not have dtype object if not self.left_index: - left_on = self.left_on[0] - if is_array_like(left_on): - lo_dtype = left_on.dtype + left_on_0 = left_on[0] + if is_array_like(left_on_0): + lo_dtype = left_on_0.dtype else: lo_dtype = ( - self.left[left_on].dtype - if left_on in self.left.columns - else self.left.index.get_level_values(left_on) + self.left[left_on_0].dtype + if left_on_0 in self.left.columns + else self.left.index.get_level_values(left_on_0) ) else: lo_dtype = self.left.index.dtype if not self.right_index: - right_on = self.right_on[0] - if is_array_like(right_on): - ro_dtype = right_on.dtype + right_on_0 = right_on[0] + if is_array_like(right_on_0): + ro_dtype = right_on_0.dtype else: ro_dtype = ( - self.right[right_on].dtype - if right_on in self.right.columns - else self.right.index.get_level_values(right_on) + self.right[right_on_0].dtype + if right_on_0 in self.right.columns + else self.right.index.get_level_values(right_on_0) ) else: ro_dtype = self.right.index.dtype @@ -1837,13 +1839,15 @@ def _validate_specification(self) -> None: if len(self.left_by) != len(self.right_by): raise MergeError("left_by and right_by must be same length") - self.left_on = self.left_by + list(self.left_on) - self.right_on = self.right_by + list(self.right_on) + left_on = self.left_by + list(left_on) + right_on = self.right_by + list(right_on) # check 'direction' is valid if self.direction not in ["backward", "forward", "nearest"]: raise MergeError(f"direction invalid: {self.direction}") + return left_on, right_on + def _get_merge_keys(self): # note this function has side effects
Besides being better-practice, this will make it easier to annotate. Done separately bc im tracking down what appear to be incorrect annotations that need to be fixed in order to annotate these methods.
https://api.github.com/repos/pandas-dev/pandas/pulls/48051
2022-08-11T23:39:17Z
2022-08-15T16:08:15Z
2022-08-15T16:08:15Z
2022-08-15T16:14:04Z
ENH: allow user to infer SAS file encoding; add correct encoding names
diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index 405b8cc0a5ded..252a618150aff 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -28,6 +28,7 @@ enhancement2 Other enhancements ^^^^^^^^^^^^^^^^^^ +- :func:`read_sas` now supports using ``encoding='infer'`` to correctly read and use the encoding specified by the sas file. (:issue:`48048`) - :meth:`.DataFrameGroupBy.quantile` and :meth:`.SeriesGroupBy.quantile` now preserve nullable dtypes instead of casting to numpy dtypes (:issue:`37493`) - :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support an ``axis`` argument. If ``axis`` is set, the default behaviour of which axis to consider can be overwritten (:issue:`47819`) - :func:`assert_frame_equal` now shows the first element where the DataFrames differ, analogously to ``pytest``'s output (:issue:`47910`) diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index bdefb6f42d8bd..91c5e6b227c35 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -147,8 +147,10 @@ class SAS7BDATReader(ReaderBase, abc.Iterator): chunksize : int, defaults to None Return SAS7BDATReader object for iterations, returns chunks with given number of lines. - encoding : string, defaults to None - String encoding. + encoding : str, 'infer', defaults to None + String encoding acc. to python standard encodings, + encoding='infer' tries to detect the encoding from the file header, + encoding=None will leave the data in binary format. convert_text : bool, defaults to True If False, text variables are left as raw bytes. convert_header_text : bool, defaults to True @@ -265,9 +267,11 @@ def _get_properties(self) -> None: # Get encoding information buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] if buf in const.encoding_names: - self.file_encoding = const.encoding_names[buf] + self.inferred_encoding = const.encoding_names[buf] + if self.encoding == "infer": + self.encoding = self.inferred_encoding else: - self.file_encoding = f"unknown (code={buf})" + self.inferred_encoding = f"unknown (code={buf})" # Get platform information buf = self._read_bytes(const.platform_offset, const.platform_length) diff --git a/pandas/io/sas/sas_constants.py b/pandas/io/sas/sas_constants.py index 69bc16e6d294f..a090b8a1acb3c 100644 --- a/pandas/io/sas/sas_constants.py +++ b/pandas/io/sas/sas_constants.py @@ -107,15 +107,64 @@ compression_literals: Final = [rle_compression, rdc_compression] # Incomplete list of encodings, using SAS nomenclature: -# http://support.sas.com/documentation/cdl/en/nlsref/61893/HTML/default/viewer.htm#a002607278.htm +# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html +# corresponding to the Python documentation of standard encodings +# https://docs.python.org/3/library/codecs.html#standard-encodings encoding_names: Final = { - 29: "latin1", 20: "utf-8", + 29: "latin1", + 30: "latin2", + 31: "latin3", + 32: "latin4", 33: "cyrillic", - 60: "wlatin2", - 61: "wcyrillic", - 62: "wlatin1", - 90: "ebcdic870", + 34: "arabic", + 35: "greek", + 36: "hebrew", + 37: "latin5", + 38: "latin6", + 39: "cp874", + 40: "latin9", + 41: "cp437", + 42: "cp850", + 43: "cp852", + 44: "cp857", + 45: "cp858", + 46: "cp862", + 47: "cp864", + 48: "cp865", + 49: "cp866", + 50: "cp869", + 51: "cp874", + # 52: "", # not found + # 53: "", # not found + # 54: "", # not found + 55: "cp720", + 56: "cp737", + 57: "cp775", + 58: "cp860", + 59: "cp863", + 60: "cp1250", + 61: "cp1251", + 62: "cp1252", + 63: "cp1253", + 64: "cp1254", + 65: "cp1255", + 66: "cp1256", + 67: "cp1257", + 68: "cp1258", + 118: "cp950", + # 119: "", # not found + 123: "big5", + 125: "gb2312", + 126: "cp936", + 134: "euc_jp", + 136: "cp932", + 138: "shift_jis", + 140: "euc-kr", + 141: "cp949", + 227: "latin8", + # 228: "", # not found + # 229: "" # not found } diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 2b7ecbcdf9f80..cee416ac218de 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -136,6 +136,21 @@ def test_encoding_options(datapath): assert x == y.decode() +def test_encoding_infer(datapath): + fname = datapath("io", "sas", "data", "test1.sas7bdat") + + with pd.read_sas(fname, encoding="infer", iterator=True) as df1_reader: + # check: is encoding inferred correctly from file + assert df1_reader.inferred_encoding == "cp1252" + df1 = df1_reader.read() + + with pd.read_sas(fname, encoding="cp1252", iterator=True) as df2_reader: + df2 = df2_reader.read() + + # check: reader reads correct information + tm.assert_frame_equal(df1, df2) + + def test_productsales(datapath): fname = datapath("io", "sas", "data", "productsales.sas7bdat") df = pd.read_sas(fname, encoding="utf-8")
- [x] closes #48048 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48050
2022-08-11T22:32:14Z
2022-09-19T23:21:57Z
2022-09-19T23:21:57Z
2022-10-13T16:59:56Z
CI: Pin setuptools
diff --git a/.circleci/setup_env.sh b/.circleci/setup_env.sh index c03a7ff4be8b3..42200c31ec28a 100755 --- a/.circleci/setup_env.sh +++ b/.circleci/setup_env.sh @@ -51,7 +51,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false -conda install -y -c conda-forge -n base 'mamba>=0.21.2' pip setuptools +conda install -y -c conda-forge -n base 'mamba>=0.21.2' pip 'setuptools<64.0.0' echo "conda info -a" conda info -a diff --git a/.github/workflows/python-dev.yml b/.github/workflows/python-dev.yml index d93b92a9662ec..1d6a386efbff8 100644 --- a/.github/workflows/python-dev.yml +++ b/.github/workflows/python-dev.yml @@ -60,7 +60,7 @@ jobs: - name: Install dependencies shell: bash -el {0} run: | - python3 -m pip install --upgrade pip setuptools wheel + python3 -m pip install --upgrade pip "setuptools<64.0.0" wheel python3 -m pip install -i https://pypi.anaconda.org/scipy-wheels-nightly/simple numpy python3 -m pip install git+https://github.com/nedbat/coveragepy.git python3 -m pip install cython python-dateutil pytz hypothesis pytest>=6.2.5 pytest-xdist pytest-cov pytest-asyncio>=0.17 diff --git a/.github/workflows/sdist.yml b/.github/workflows/sdist.yml index 1a06ea31ccbb8..2db1905cb6c7c 100644 --- a/.github/workflows/sdist.yml +++ b/.github/workflows/sdist.yml @@ -46,7 +46,7 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools wheel + python -m pip install --upgrade pip "setuptools<64.0.0" wheel # GH 39416 pip install numpy diff --git a/environment.yml b/environment.yml index 7b4c537c0bcd9..dde35288a5f90 100644 --- a/environment.yml +++ b/environment.yml @@ -4,6 +4,7 @@ channels: - conda-forge dependencies: - python=3.8 + - setuptools>=51.0.0, <64.0.0 # test dependencies - cython=0.29.32 diff --git a/pyproject.toml b/pyproject.toml index 67c56123a847c..7928570cc3dd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ # Minimum requirements for the build system to execute. # See https://github.com/scipy/scipy/pull/12940 for the AIX issue. requires = [ - "setuptools>=51.0.0", + "setuptools>=51.0.0, <64.0.0", "wheel", "Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json "oldest-supported-numpy>=0.10" diff --git a/requirements-dev.txt b/requirements-dev.txt index 7c7271bb2d8b7..6b53e631f67ee 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. +setuptools>=51.0.0, <64.0.0 cython==0.29.32 pytest>=6.0 pytest-cov @@ -100,4 +101,4 @@ requests jupyterlab >=3.4,<4 jupyterlite==0.1.0b10 sphinx-toggleprompt -setuptools>=51.0.0 +setuptools>=51.0.0, <64.0.0
- [ ] xref #48046 (Replace xxxx with the Github issue number)
https://api.github.com/repos/pandas-dev/pandas/pulls/48047
2022-08-11T21:42:02Z
2022-08-12T15:01:48Z
null
2022-08-12T17:28:45Z
Nanoseconds added
diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 6a17a56a47cbc..5cac852b40496 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -611,3 +611,11 @@ def test_setitem_scalar_with_mask_validation(dtype): msg = "Scalar must be NA or str" with pytest.raises(ValueError, match=msg): ser[mask] = 1 + + +def test_consitency_inplace(): + df = pd.DataFrame({"M": [""]}, dtype="string") + df2 = pd.DataFrame({"M": [""]}, dtype="string") + df2.where(df2 != "", np.nan, inplace=True) + df = df.where(df != "", np.nan) + tm.assert_frame_equal(df, df2)
null
https://api.github.com/repos/pandas-dev/pandas/pulls/48045
2022-08-11T21:11:10Z
2022-08-11T21:54:40Z
null
2022-08-18T23:36:19Z
REGR: groupby sum causing overflow for int8
diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index 3ec37718eb652..814cd223eb445 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -51,7 +51,7 @@ def group_any_all( skipna: bool, ) -> None: ... def group_sum( - out: np.ndarray, # complexfloatingintuint_t[:, ::1] + out: np.ndarray, # complexfloatingint64uint64_t[:, ::1] counts: np.ndarray, # int64_t[::1] values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2] labels: np.ndarray, # const intp_t[:] diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e4314edecfa7e..f24e0d8faffdc 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -524,11 +524,16 @@ ctypedef fused sum_t: uint64_t object +ctypedef fused sum_out_t: + mean_t + int64_t + uint64_t + object @cython.wraparound(False) @cython.boundscheck(False) def group_sum( - sum_t[:, ::1] out, + sum_out_t[:, ::1] out, int64_t[::1] counts, ndarray[sum_t, ndim=2] values, const intp_t[::1] labels, @@ -542,55 +547,54 @@ def group_sum( """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - sum_t val, t, y - sum_t[:, ::1] sumx, compensation + sum_out_t val, t, y + sum_out_t[:, ::1] sumx, compensation int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) bint uses_mask = mask is not None bint isna_entry - if len_values != len_labels: - raise ValueError("len(index) != len(labels)") + if (sum_out_t is float32_t and not sum_t is float32_t or + sum_t is float32_t and not sum_out_t is float32_t): + raise NotImplementedError # pragma: no cover - nobs = np.zeros((<object>out).shape, dtype=np.int64) - # the below is equivalent to `np.zeros_like(out)` but faster - sumx = np.zeros((<object>out).shape, dtype=(<object>out).base.dtype) - compensation = np.zeros((<object>out).shape, dtype=(<object>out).base.dtype) + elif (sum_out_t is float64_t and not sum_t is float64_t or + sum_t is float64_t and not sum_out_t is float64_t): + raise NotImplementedError # pragma: no cover - N, K = (<object>values).shape + elif (sum_out_t is complex64_t and not sum_t is complex64_t or + sum_t is complex64_t and not sum_out_t is complex64_t): + raise NotImplementedError # pragma: no cover - if sum_t is object: - # NB: this does not use 'compensation' like the non-object track does. - for i in range(N): - lab = labels[i] - if lab < 0: - continue + elif (sum_out_t is complex128_t and not sum_t is complex128_t or + sum_t is complex128_t and not sum_out_t is complex128_t): + raise NotImplementedError # pragma: no cover - counts[lab] += 1 - for j in range(K): - val = values[i, j] + elif (sum_out_t is object and not sum_t is object or + sum_t is object and not sum_out_t is object): + raise NotImplementedError # pragma: no cover - # not nan - if not checknull(val): - nobs[lab, j] += 1 + elif (sum_out_t is uint64_t and ( + sum_t is int8_t or sum_t is int16_t or sum_t is int32_t or sum_t is int64_t) + or sum_out_t is int64_t and ( + sum_t is uint8_t or sum_t is uint16_t or sum_t is uint32_t + or sum_t is uint64_t)): + raise NotImplementedError # pragma: no cover - if nobs[lab, j] == 1: - # i.e. we haven't added anything yet; avoid TypeError - # if e.g. val is a str and sumx[lab, j] is 0 - t = val - else: - t = sumx[lab, j] + val - sumx[lab, j] = t + else: - for i in range(ncounts): - for j in range(K): - if nobs[i, j] < min_count: - out[i, j] = None + if len_values != len_labels: + raise ValueError("len(index) != len(labels)") - else: - out[i, j] = sumx[i, j] - else: - with nogil: + nobs = np.zeros((<object>out).shape, dtype=np.int64) + # the below is equivalent to `np.zeros_like(out)` but faster + sumx = np.zeros((<object>out).shape, dtype=(<object>out).base.dtype) + compensation = np.zeros((<object>out).shape, dtype=(<object>out).base.dtype) + + N, K = (<object>values).shape + + if sum_t is object: + # NB: this does not use 'compensation' like the non-object track does. for i in range(N): lab = labels[i] if lab < 0: @@ -601,49 +605,79 @@ def group_sum( val = values[i, j] # not nan - # With dt64/td64 values, values have been cast to float64 - # instead if int64 for group_sum, but the logic - # is otherwise the same as in _treat_as_na - if uses_mask: - isna_entry = mask[i, j] - elif (sum_t is float32_t or sum_t is float64_t - or sum_t is complex64_t or sum_t is complex64_t): - # avoid warnings because of equality comparison - isna_entry = not val == val - elif sum_t is int64_t and is_datetimelike and val == NPY_NAT: - isna_entry = True - else: - isna_entry = False - - if not isna_entry: + if not checknull(val): nobs[lab, j] += 1 - y = val - compensation[lab, j] - t = sumx[lab, j] + y - compensation[lab, j] = t - sumx[lab, j] - y + + if nobs[lab, j] == 1: + # i.e. we haven't added anything yet; avoid TypeError + # if e.g. val is a str and sumx[lab, j] is 0 + t = val + else: + t = sumx[lab, j] + val sumx[lab, j] = t for i in range(ncounts): for j in range(K): if nobs[i, j] < min_count: - # if we are integer dtype, not is_datetimelike, and - # not uses_mask, then getting here implies that - # counts[i] < min_count, which means we will - # be cast to float64 and masked at the end - # of WrappedCythonOp._call_cython_op. So we can safely - # set a placeholder value in out[i, j]. + out[i, j] = None + + else: + out[i, j] = sumx[i, j] + else: + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + # With dt64/td64 values, values have been cast to float64 + # instead if int64 for group_sum, but the logic + # is otherwise the same as in _treat_as_na if uses_mask: - result_mask[i, j] = True + isna_entry = mask[i, j] elif (sum_t is float32_t or sum_t is float64_t or sum_t is complex64_t or sum_t is complex64_t): - out[i, j] = NAN - elif sum_t is int64_t: - out[i, j] = NPY_NAT + # avoid warnings because of equality comparison + isna_entry = not val == val + elif sum_t is int64_t and is_datetimelike and val == NPY_NAT: + isna_entry = True else: - # placeholder, see above - out[i, j] = 0 + isna_entry = False + + if not isna_entry: + nobs[lab, j] += 1 + y = val - compensation[lab, j] + t = sumx[lab, j] + y + compensation[lab, j] = t - sumx[lab, j] - y + sumx[lab, j] = t + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] < min_count: + # if we are integer dtype, not is_datetimelike, and + # not uses_mask, then getting here implies that + # counts[i] < min_count, which means we will + # be cast to float64 and masked at the end + # of WrappedCythonOp._call_cython_op. So we can safely + # set a placeholder value in out[i, j]. + if uses_mask: + result_mask[i, j] = True + elif (sum_t is float32_t or sum_t is float64_t + or sum_t is complex64_t or sum_t is complex64_t): + out[i, j] = NAN + elif sum_t is int64_t: + out[i, j] = NPY_NAT + else: + # placeholder, see above + out[i, j] = 0 - else: - out[i, j] = sumx[i, j] + else: + out[i, j] = sumx[i, j] @cython.wraparound(False) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 7617ca5074c9c..167ca65ff4d73 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -183,7 +183,10 @@ def _get_cython_function( f"function is not implemented for this dtype: " f"[how->{how},dtype->{dtype_str}]" ) - elif "object" not in f.__signatures__: + elif ( + "object" not in f.__signatures__ + and "object|object" not in f.__signatures__ + ): # raise NotImplementedError here rather than TypeError later raise NotImplementedError( f"function is not implemented for this dtype: " @@ -293,6 +296,8 @@ def _get_out_dtype(self, dtype: np.dtype) -> np.dtype: if how == "rank": out_dtype = "float64" + elif how == "sum" and is_integer_dtype(dtype): + out_dtype = f"{dtype.kind}8" else: if is_numeric_dtype(dtype): out_dtype = f"{dtype.kind}{dtype.itemsize}" diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index a7c5b85e365ae..ade2b1ebbecfe 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2829,3 +2829,11 @@ def test_groupby_sum_support_mask(any_numeric_ea_dtype): dtype=any_numeric_ea_dtype, ) tm.assert_frame_equal(result, expected) + + +def test_groupby_sum_int8_overflow(): + # GH#37493 + df = DataFrame({"a": [1, 2, 2], "b": [125, 111, 111]}, dtype="int8") + result = df.groupby("a").sum() + expected = DataFrame({"b": [125, 222]}, index=Index([1, 2], name="a")) + tm.assert_frame_equal(result, expected)
This fixes the regression from #48018 BUT: Handling this on the cython level like this is really ugly... and will blow up our wheel size if we introduce this everywhere, but I could not find a way to restrict the compile combinations. If there is no better way of handling this on the cython level I would propose casting values to int64/uint64 before calling the group_sum function. This would introduce an unnecessary cast and reduce performance, hence why I was looking for a solution on the cython level. cc @jorisvandenbossche cc @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/48044
2022-08-11T21:06:45Z
2022-08-12T19:09:49Z
null
2022-08-17T20:02:50Z
ENH: set_index copy kwd
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 6bf0f288645b7..599ef6150168a 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -293,6 +293,7 @@ Other enhancements - :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`) - :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`) - :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`) +- :meth:`DataFrame.set_index` now supports a ``copy`` keyword. If ``False``, the underlying data is not copied when a new :class:`DataFrame` is returned (:issue:`48043`) .. --------------------------------------------------------------------------- .. _whatsnew_150.notable_bug_fixes: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0a7a6494d04eb..8c4924a2483be 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5824,6 +5824,7 @@ def set_index( append: bool = ..., inplace: Literal[False] = ..., verify_integrity: bool = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame: ... @@ -5836,6 +5837,7 @@ def set_index( append: bool = ..., inplace: Literal[True], verify_integrity: bool = ..., + copy: bool | lib.NoDefault = ..., ) -> None: ... @@ -5847,6 +5849,7 @@ def set_index( append: bool = False, inplace: bool = False, verify_integrity: bool = False, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame | None: """ Set the DataFrame index using existing columns. @@ -5873,6 +5876,11 @@ def set_index( Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. + copy : bool, default True + Whether to make a copy of the underlying data when returning a new + DataFrame. + + .. versionadded:: 1.5.0 Returns ------- @@ -5938,6 +5946,13 @@ def set_index( 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if copy is not lib.no_default: + raise ValueError("Cannot specify copy when inplace=True") + copy = False + elif copy is lib.no_default: + copy = True + self._check_inplace_and_allows_duplicate_labels(inplace) if not isinstance(keys, list): keys = [keys] @@ -5973,7 +5988,7 @@ def set_index( if inplace: frame = self else: - frame = self.copy() + frame = self.copy(deep=copy) arrays = [] names: list[Hashable] = [] diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 4c39cf99f18ff..9392d3c146942 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -25,6 +25,25 @@ class TestSetIndex: + def test_set_index_copy(self): + # GH#48043 + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + expected = DataFrame({"B": [3, 4], "C": [5, 6]}, index=Index([1, 2], name="A")) + + res = df.set_index("A", copy=True) + tm.assert_frame_equal(res, expected) + assert not any(tm.shares_memory(df[col], res[col]) for col in res.columns) + + res = df.set_index("A", copy=False) + tm.assert_frame_equal(res, expected) + assert all(tm.shares_memory(df[col], res[col]) for col in res.columns) + + msg = "Cannot specify copy when inplace=True" + with pytest.raises(ValueError, match=msg): + df.set_index("A", inplace=True, copy=True) + with pytest.raises(ValueError, match=msg): + df.set_index("A", inplace=True, copy=False) + def test_set_index_multiindex(self): # segfault in GH#3308 d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]}
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48043
2022-08-11T20:43:58Z
2022-08-12T18:17:09Z
2022-08-12T18:17:09Z
2022-08-13T17:59:20Z
TST: GH39984 Addition to tests
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 7fad59fc6654c..4dc3c5a4ae8b9 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from pandas.compat import pa_version_under1p01 + from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike import pandas as pd @@ -648,6 +650,28 @@ def test_from_frame(): tm.assert_index_equal(expected, result) [email protected](pa_version_under1p01, reason="Import Problem") +def test_from_frame_missing_values_multiIndex(): + # GH 39984 + import pyarrow as pa + + df = pd.DataFrame( + { + "a": Series([1, 2, None], dtype="Int64"), + "b": pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + } + ) + multi_indexed = MultiIndex.from_frame(df) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, None]).astype("Int64"), + pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + ], + names=["a", "b"], + ) + tm.assert_index_equal(multi_indexed, expected) + + @pytest.mark.parametrize( "non_frame", [
- [x] closes #39984 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48042
2022-08-11T20:41:51Z
2022-08-26T19:02:55Z
2022-08-26T19:02:55Z
2022-10-13T16:59:56Z
CI: Include FutureWarning from numpy in npdev build
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index a759280c74521..c4398efb12c3d 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -64,7 +64,7 @@ jobs: env_file: actions-310-numpydev.yaml pattern: "not slow and not network and not single_cpu" pandas_testing_mode: "deprecate" - test_args: "-W error::DeprecationWarning:numpy" + test_args: "-W error::DeprecationWarning:numpy -W error::FutureWarning:numpy" exclude: - env_file: actions-39.yaml pyarrow_version: "6"
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature xref https://github.com/pandas-dev/pandas/pull/46767#discussion_r849950834 cc @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/48041
2022-08-11T19:22:13Z
2022-08-17T01:59:32Z
2022-08-17T01:59:32Z
2022-08-17T02:12:07Z
REF: make indicator_name a cache_readonly
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index cb392eee1d589..659cb1d2f6838 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -35,6 +35,7 @@ from pandas.util._decorators import ( Appender, Substitution, + cache_readonly, ) from pandas.util._exceptions import find_stack_level @@ -651,16 +652,6 @@ def __init__( self.indicator = indicator - self.indicator_name: str | None - if isinstance(self.indicator, str): - self.indicator_name = self.indicator - elif isinstance(self.indicator, bool): - self.indicator_name = "_merge" if self.indicator else None - else: - raise ValueError( - "indicator option can only accept boolean or string arguments" - ) - if not is_bool(left_index): raise ValueError( f"left_index parameter must be of type bool, not {type(left_index)}" @@ -753,6 +744,17 @@ def _maybe_drop_cross_column( if cross_col is not None: del result[cross_col] + @cache_readonly + def _indicator_name(self) -> str | None: + if isinstance(self.indicator, str): + return self.indicator + elif isinstance(self.indicator, bool): + return "_merge" if self.indicator else None + else: + raise ValueError( + "indicator option can only accept boolean or string arguments" + ) + def _indicator_pre_merge( self, left: DataFrame, right: DataFrame ) -> tuple[DataFrame, DataFrame]: @@ -765,7 +767,7 @@ def _indicator_pre_merge( "Cannot use `indicator=True` option when " f"data contains a column named {i}" ) - if self.indicator_name in columns: + if self._indicator_name in columns: raise ValueError( "Cannot use name of an existing column for indicator column" ) @@ -786,13 +788,13 @@ def _indicator_post_merge(self, result: DataFrame) -> DataFrame: result["_left_indicator"] = result["_left_indicator"].fillna(0) result["_right_indicator"] = result["_right_indicator"].fillna(0) - result[self.indicator_name] = Categorical( + result[self._indicator_name] = Categorical( (result["_left_indicator"] + result["_right_indicator"]), categories=[1, 2, 3], ) - result[self.indicator_name] = result[self.indicator_name].cat.rename_categories( - ["left_only", "right_only", "both"] - ) + result[self._indicator_name] = result[ + self._indicator_name + ].cat.rename_categories(["left_only", "right_only", "both"]) result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) return result
Too much going on in `__init__` ATM.
https://api.github.com/repos/pandas-dev/pandas/pulls/48040
2022-08-11T18:20:08Z
2022-08-12T18:26:17Z
2022-08-12T18:26:17Z
2022-08-12T18:42:04Z
CLN: Remove mangle_dupe_cols argument
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index d794692de5005..c0cc985a42b87 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -548,6 +548,7 @@ Other API changes - Operations with :class:`Timestamp` or :class:`Timedelta` that would previously raise ``OverflowError`` instead raise ``OutOfBoundsDatetime`` or ``OutOfBoundsTimedelta`` where appropriate (:issue:`47268`) - When :func:`read_sas` previously returned ``None``, it now returns an empty :class:`DataFrame` (:issue:`47410`) - :class:`DataFrame` constructor raises if ``index`` or ``columns`` arguments are sets (:issue:`47215`) +- Removed ``mangle_dupe_cols`` argument from :func:`read_csv`, :func:`read_fwf` and :func:`read_excel`. The argument was never fully implemented and only supported value ``True`` (:issue:`47718`) .. --------------------------------------------------------------------------- .. _whatsnew_150.deprecations: diff --git a/pandas/_libs/parsers.pyi b/pandas/_libs/parsers.pyi index 6b0bbf183f07e..89f0580fec965 100644 --- a/pandas/_libs/parsers.pyi +++ b/pandas/_libs/parsers.pyi @@ -58,7 +58,6 @@ class TextReader: skiprows=..., skipfooter: int = ..., # int64_t verbose: bool = ..., - mangle_dupe_cols: bool = ..., float_precision: Literal["round_trip", "legacy", "high"] | None = ..., skip_blank_lines: bool = ..., encoding_errors: bytes | str = ..., diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index b07fa143c98b6..cfeca8d3615be 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -312,7 +312,7 @@ cdef class TextReader: object handle object orig_header bint na_filter, keep_default_na, verbose, has_usecols, has_mi_columns - bint mangle_dupe_cols, allow_leading_cols + bint allow_leading_cols uint64_t parser_start # this is modified after __init__ list clocks const char *encoding_errors @@ -367,7 +367,6 @@ cdef class TextReader: skiprows=None, skipfooter=0, # int64_t bint verbose=False, - bint mangle_dupe_cols=True, float_precision=None, bint skip_blank_lines=True, encoding_errors=b"strict"): @@ -383,8 +382,6 @@ cdef class TextReader: self.parser = parser_new() self.parser.chunksize = tokenize_chunksize - self.mangle_dupe_cols = mangle_dupe_cols - # For timekeeping self.clocks = [] @@ -672,7 +669,7 @@ cdef class TextReader: this_header.append(name) - if not self.has_mi_columns and self.mangle_dupe_cols: + if not self.has_mi_columns: # Ensure that regular columns are used before unnamed ones # to keep given names and mangle unnamed columns col_loop_order = [i for i in range(len(this_header)) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 44152f100d390..5310279256066 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -275,10 +275,6 @@ .. deprecated:: 1.3.0 convert_float will be removed in a future version -mangle_dupe_cols : bool, default True - Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than - 'X'...'X'. Passing in False will cause data to be overwritten if there - are duplicate names in the columns. {storage_options} .. versionadded:: 1.2.0 @@ -386,7 +382,6 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., convert_float: bool | None = ..., - mangle_dupe_cols: bool = ..., storage_options: StorageOptions = ..., ) -> DataFrame: ... @@ -425,7 +420,6 @@ def read_excel( comment: str | None = ..., skipfooter: int = ..., convert_float: bool | None = ..., - mangle_dupe_cols: bool = ..., storage_options: StorageOptions = ..., ) -> dict[IntStrT, DataFrame]: ... @@ -465,7 +459,6 @@ def read_excel( comment: str | None = None, skipfooter: int = 0, convert_float: bool | None = None, - mangle_dupe_cols: bool = True, storage_options: StorageOptions = None, ) -> DataFrame | dict[IntStrT, DataFrame]: @@ -504,7 +497,6 @@ def read_excel( comment=comment, skipfooter=skipfooter, convert_float=convert_float, - mangle_dupe_cols=mangle_dupe_cols, ) finally: # make sure to close opened file handles @@ -709,7 +701,6 @@ def parse( comment: str | None = None, skipfooter: int = 0, convert_float: bool | None = None, - mangle_dupe_cols: bool = True, **kwds, ): @@ -877,7 +868,6 @@ def parse( comment=comment, skipfooter=skipfooter, usecols=usecols, - mangle_dupe_cols=mangle_dupe_cols, **kwds, ) @@ -1686,7 +1676,6 @@ def parse( comment: str | None = None, skipfooter: int = 0, convert_float: bool | None = None, - mangle_dupe_cols: bool = True, **kwds, ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]: """ @@ -1719,7 +1708,6 @@ def parse( comment=comment, skipfooter=skipfooter, convert_float=convert_float, - mangle_dupe_cols=mangle_dupe_cols, **kwds, ) diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 0e40e47bf7cb1..2a596ad2b549a 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -112,7 +112,6 @@ def __init__(self, kwds) -> None: self.true_values = kwds.get("true_values") self.false_values = kwds.get("false_values") - self.mangle_dupe_cols = kwds.get("mangle_dupe_cols", True) self.infer_datetime_format = kwds.pop("infer_datetime_format", False) self.cache_dates = kwds.pop("cache_dates", True) @@ -325,33 +324,32 @@ def extract(r): return names, index_names, col_names, passed_names @final - def _maybe_dedup_names(self, names: Sequence[Hashable]) -> Sequence[Hashable]: + def _dedup_names(self, names: Sequence[Hashable]) -> Sequence[Hashable]: # see gh-7160 and gh-9424: this helps to provide # immediate alleviation of the duplicate names # issue and appears to be satisfactory to users, # but ultimately, not needing to butcher the names # would be nice! - if self.mangle_dupe_cols: - names = list(names) # so we can index - counts: DefaultDict[Hashable, int] = defaultdict(int) - is_potential_mi = _is_potential_multi_index(names, self.index_col) + names = list(names) # so we can index + counts: DefaultDict[Hashable, int] = defaultdict(int) + is_potential_mi = _is_potential_multi_index(names, self.index_col) - for i, col in enumerate(names): - cur_count = counts[col] + for i, col in enumerate(names): + cur_count = counts[col] - while cur_count > 0: - counts[col] = cur_count + 1 + while cur_count > 0: + counts[col] = cur_count + 1 - if is_potential_mi: - # for mypy - assert isinstance(col, tuple) - col = col[:-1] + (f"{col[-1]}.{cur_count}",) - else: - col = f"{col}.{cur_count}" - cur_count = counts[col] + if is_potential_mi: + # for mypy + assert isinstance(col, tuple) + col = col[:-1] + (f"{col[-1]}.{cur_count}",) + else: + col = f"{col}.{cur_count}" + cur_count = counts[col] - names[i] = col - counts[col] = cur_count + 1 + names[i] = col + counts[col] = cur_count + 1 return names @@ -1135,7 +1133,6 @@ def converter(*date_cols): "encoding": None, "squeeze": None, "compression": None, - "mangle_dupe_cols": True, "infer_datetime_format": False, "skip_blank_lines": True, "encoding_errors": "strict", diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index aec999e40b0f5..773d9cc162aa6 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -248,7 +248,7 @@ def read( except StopIteration: if self._first_chunk: self._first_chunk = False - names = self._maybe_dedup_names(self.orig_names) + names = self._dedup_names(self.orig_names) index, columns, col_dict = self._get_empty_meta( names, self.index_col, @@ -295,7 +295,7 @@ def read( if self.usecols is not None: names = self._filter_usecols(names) - names = self._maybe_dedup_names(names) + names = self._dedup_names(names) # rename dict keys data_tups = sorted(data.items()) @@ -317,7 +317,7 @@ def read( # assert for mypy, orig_names is List or None, None would error in list(...) assert self.orig_names is not None names = list(self.orig_names) - names = self._maybe_dedup_names(names) + names = self._dedup_names(names) if self.usecols is not None: names = self._filter_usecols(names) diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 7c03a81dbc0e6..af9c0b069e1d4 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -261,7 +261,7 @@ def read( columns: Sequence[Hashable] = list(self.orig_names) if not len(content): # pragma: no cover # DataFrame with the right metadata, even though it's length 0 - names = self._maybe_dedup_names(self.orig_names) + names = self._dedup_names(self.orig_names) # error: Cannot determine type of 'index_col' index, columns, col_dict = self._get_empty_meta( names, @@ -295,7 +295,7 @@ def _exclude_implicit_index( self, alldata: list[np.ndarray], ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: - names = self._maybe_dedup_names(self.orig_names) + names = self._dedup_names(self.orig_names) offset = 0 if self._implicit_index: @@ -426,7 +426,7 @@ def _infer_columns( else: this_columns.append(c) - if not have_mi_columns and self.mangle_dupe_cols: + if not have_mi_columns: counts: DefaultDict = defaultdict(int) # Ensure that regular columns are used before unnamed ones # to keep given names and mangle unnamed columns diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index dc4556542d8e2..b2dcd6fdc091f 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -158,10 +158,6 @@ .. deprecated:: 1.4.0 Use a list comprehension on the DataFrame's columns after calling ``read_csv``. -mangle_dupe_cols : bool, default True - Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than - 'X'...'X'. Passing in False will cause data to be overwritten if there - are duplicate names in the columns. dtype : Type name or dict of column -> type, optional Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32, 'c': 'Int64'}} @@ -618,7 +614,6 @@ def read_csv( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -678,7 +673,6 @@ def read_csv( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -738,7 +732,6 @@ def read_csv( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -798,7 +791,6 @@ def read_csv( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -867,7 +859,6 @@ def read_csv( usecols=None, squeeze: bool | None = None, prefix: str | lib.NoDefault = lib.no_default, - mangle_dupe_cols: bool = True, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, @@ -956,7 +947,6 @@ def read_table( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -1016,7 +1006,6 @@ def read_table( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -1076,7 +1065,6 @@ def read_table( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -1136,7 +1124,6 @@ def read_table( usecols=..., squeeze: bool | None = ..., prefix: str | lib.NoDefault = ..., - mangle_dupe_cols: bool = ..., dtype: DtypeArg | None = ..., engine: CSVEngine | None = ..., converters=..., @@ -1205,7 +1192,6 @@ def read_table( usecols=None, squeeze: bool | None = None, prefix: str | lib.NoDefault = lib.no_default, - mangle_dupe_cols: bool = True, # General Parsing Configuration dtype: DtypeArg | None = None, engine: CSVEngine | None = None, @@ -1468,9 +1454,6 @@ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: f"The {repr(argname)} option is not supported with the " f"'pyarrow' engine" ) - elif argname == "mangle_dupe_cols" and value is False: - # GH12935 - raise ValueError("Setting mangle_dupe_cols=False is not supported yet") else: options[argname] = value diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py index ba6366b71d854..9708294ec1e68 100644 --- a/pandas/tests/io/excel/test_writers.py +++ b/pandas/tests/io/excel/test_writers.py @@ -975,12 +975,6 @@ def test_duplicated_columns(self, path): result = pd.read_excel(path, sheet_name="test1", index_col=0) tm.assert_frame_equal(result, expected) - # Explicitly, we pass in the parameter. - result = pd.read_excel( - path, sheet_name="test1", index_col=0, mangle_dupe_cols=True - ) - tm.assert_frame_equal(result, expected) - # see gh-11007, gh-10970 df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"]) df.to_excel(path, "test1") @@ -998,10 +992,6 @@ def test_duplicated_columns(self, path): expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) tm.assert_frame_equal(result, expected) - msg = "Setting mangle_dupe_cols=False is not supported yet" - with pytest.raises(ValueError, match=msg): - pd.read_excel(path, sheet_name="test1", header=None, mangle_dupe_cols=False) - def test_swapped_columns(self, path): # Test for issue #5427. write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py index 3f7b1b5dfa19b..5709e7e4027e8 100644 --- a/pandas/tests/io/parser/test_mangle_dupes.py +++ b/pandas/tests/io/parser/test_mangle_dupes.py @@ -14,14 +14,11 @@ @skip_pyarrow [email protected]("kwargs", [{}, {"mangle_dupe_cols": True}]) -def test_basic(all_parsers, kwargs): - # TODO: add test for condition "mangle_dupe_cols=False" - # once it is actually supported (gh-12935) +def test_basic(all_parsers): parser = all_parsers data = "a,a,b,b,b\n1,2,3,4,5" - result = parser.read_csv(StringIO(data), sep=",", **kwargs) + result = parser.read_csv(StringIO(data), sep=",") expected = DataFrame([[1, 2, 3, 4, 5]], columns=["a", "a.1", "b", "b.1", "b.2"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 2f28697daf9e2..e99387be4eee2 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -31,15 +31,6 @@ def python_engine(request): class TestUnsupportedFeatures: - def test_mangle_dupe_cols_false(self): - # see gh-12935 - data = "a b c\n1 2 3" - msg = "is not supported" - - for engine in ("c", "python"): - with pytest.raises(ValueError, match=msg): - read_csv(StringIO(data), engine=engine, mangle_dupe_cols=False) - def test_c_engine(self): # see gh-6607 data = "a b c\n1 2 3"
- [X] xref #47718 - [X] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [X] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [X] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [X] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Addressing #47718 step by step. Here I remove the `mangle_dupe_cols`, which was never implemented. I directly remove it, instead of showing deprecation, because I don't think in practice this should be breaking code. For example, the next code: ```python pandas.read_csv(fname, mangle_dupe_cols=False) ``` Would raise an exception in all cases. So, I don't think making it showing a warning before raising the exception should be very useful. There could be the case of someone using: ```python pandas.read_csv(fname, mangle_dupe_cols=True) ``` Where the `mangle_dupe_cols=True` literally does nothing as it's the default. If people were doing this it'd make sense to show a `FutureWarning`, but I don't see any reason why anyone would do that. So, in practice, I think we'd just be overcomplicating things with the deprecation in this case, and I think it's better to simply delete it.
https://api.github.com/repos/pandas-dev/pandas/pulls/48037
2022-08-11T12:22:23Z
2022-08-12T05:56:20Z
null
2022-08-12T05:56:20Z
Doc: Series.rename parameter documentation with Tests
diff --git a/pandas/core/series.py b/pandas/core/series.py index 206fcbe05d006..c96a5c7f15e71 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4859,17 +4859,23 @@ def rename( Parameters ---------- - axis : {0 or 'index'} - Unused. Parameter needed for compatibility with DataFrame. - index : scalar, hashable sequence, dict-like or function, optional + index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. - - **kwargs - Additional keyword arguments passed to the function. Only the - "inplace" keyword is used. + axis : {0 or 'index'} + Unused. Parameter needed for compatibility with DataFrame. + copy : bool, default True + Also copy underlying data. + inplace : bool, default False + Whether to return a new Series. If True the value of copy is ignored. + level : int or level name, default None + In case of MultiIndex, only rename labels in the specified level. + errors : {'ignore', 'raise'}, default 'ignore' + If 'raise', raise `KeyError` when a `dict-like mapper` or + `index` contains labels that are not present in the index being transformed. + If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py index 90c8f775586e6..729c07b8bdde7 100644 --- a/pandas/tests/series/methods/test_rename.py +++ b/pandas/tests/series/methods/test_rename.py @@ -1,4 +1,5 @@ from datetime import datetime +import re import numpy as np import pytest @@ -134,3 +135,18 @@ def test_rename_series_with_multiindex(self): series_expected = Series(np.ones(5), index=index_expected) tm.assert_series_equal(result, series_expected) + + def test_rename_error_arg(self): + # GH 46889 + ser = Series(["foo", "bar"]) + match = re.escape("[2] not found in axis") + with pytest.raises(KeyError, match=match): + ser.rename({2: 9}, errors="raise") + + def test_rename_copy_false(self): + # GH 46889 + ser = Series(["foo", "bar"]) + shallow_copy = ser.rename({1: 9}, copy=False) + ser[0] = "foobar" + assert ser[0] == shallow_copy[0] + assert ser[1] == shallow_copy[9]
- [x] closes #46889 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/48036
2022-08-11T11:51:04Z
2022-08-12T18:02:05Z
2022-08-12T18:02:04Z
2022-08-12T18:02:12Z
ENH: raise_assert_detail only shows the difference between the columns
diff --git a/pandas/_libs/testing.pyi b/pandas/_libs/testing.pyi index 01da496975f51..835cbf11bab9b 100644 --- a/pandas/_libs/testing.pyi +++ b/pandas/_libs/testing.pyi @@ -9,4 +9,5 @@ def assert_almost_equal( lobj=..., robj=..., index_values=..., + show_diff_only=..., ): ... diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index cfe9f40f12452..5fd29483620a6 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -52,7 +52,8 @@ cpdef assert_dict_equal(a, b, bint compare_keys=True): cpdef assert_almost_equal(a, b, rtol=1.e-5, atol=1.e-8, bint check_dtype=True, - obj=None, lobj=None, robj=None, index_values=None): + obj=None, lobj=None, robj=None, index_values=None, + show_diff_only=False): """ Check that left and right objects are almost equal. @@ -164,7 +165,7 @@ cpdef assert_almost_equal(a, b, from pandas._testing import raise_assert_detail msg = (f"{obj} values are different " f"({np.round(diff * 100.0 / na, 5)} %)") - raise_assert_detail(obj, msg, lobj, robj, index_values=index_values) + raise_assert_detail(obj, msg, lobj, robj, index_values=index_values,show_diff_only=show_diff_only) return True diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index 369e4b3454b65..bebb6fcb4cb12 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -68,6 +68,7 @@ def assert_almost_equal( check_less_precise: bool | int | NoDefault = no_default, rtol: float = 1.0e-5, atol: float = 1.0e-8, + show_diff_only=False, **kwargs, ) -> None: """ @@ -136,6 +137,7 @@ def assert_almost_equal( check_dtype=check_dtype, rtol=rtol, atol=atol, + show_diff_only=show_diff_only, **kwargs, ) @@ -147,6 +149,7 @@ def assert_almost_equal( check_dtype=check_dtype, rtol=rtol, atol=atol, + show_diff_only=show_diff_only, **kwargs, ) @@ -168,7 +171,13 @@ def assert_almost_equal( # if we have "equiv", this becomes True _testing.assert_almost_equal( - left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs + left, + right, + check_dtype=bool(check_dtype), + rtol=rtol, + atol=atol, + show_diff_only=show_diff_only, + **kwargs, ) @@ -639,40 +648,57 @@ def assert_timedelta_array_equal( assert_attr_equal("freq", left, right, obj=obj) -def raise_assert_detail(obj, message, left, right, diff=None, index_values=None): +def raise_assert_detail( + obj, message, left, right, diff=None, index_values=None, show_diff_only=False +): __tracebackhide__ = True msg = f"""{obj} are different {message}""" - if isinstance(index_values, np.ndarray): - msg += f"\n[index]: {pprint_thing(index_values)}" + if show_diff_only: + if not isinstance(left, Series): + left = Series(left).rename("left") + else: + left.rename(f"{left.name}_left") + if not isinstance(right, Series): + right = Series(right).rename("right") + else: + right.rename(f"{right.name}_right") - if isinstance(left, np.ndarray): - left = pprint_thing(left) - elif ( - isinstance(left, CategoricalDtype) - or isinstance(left, PandasDtype) - or isinstance(left, StringDtype) - ): - left = repr(left) - - if isinstance(right, np.ndarray): - right = pprint_thing(right) - elif ( - isinstance(right, CategoricalDtype) - or isinstance(right, PandasDtype) - or isinstance(right, StringDtype) - ): - right = repr(right) + full = pd.concat([left, right], axis=1).set_index(index_values) + + msg += "\n" + repr(full[full.iloc[:, 0] != full.iloc[:, 1]]) - msg += f""" + else: + if isinstance(index_values, np.ndarray): + msg += f"\n[index]: {pprint_thing(index_values)}" + + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif ( + isinstance(left, CategoricalDtype) + or isinstance(left, PandasDtype) + or isinstance(left, StringDtype) + ): + left = repr(left) + + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif ( + isinstance(right, CategoricalDtype) + or isinstance(right, PandasDtype) + or isinstance(right, StringDtype) + ): + right = repr(right) + + msg += f""" [left]: {left} [right]: {right}""" - if diff is not None: - msg += f"\n[diff]: {diff}" + if diff is not None: + msg += f"\n[diff]: {diff}" raise AssertionError(msg) @@ -686,6 +712,7 @@ def assert_numpy_array_equal( check_same=None, obj="numpy array", index_values=None, + show_diff_only=False, ) -> None: """ Check that 'np.ndarray' is equivalent. @@ -744,7 +771,14 @@ def _raise(left, right, err_msg): diff = diff * 100.0 / left.size msg = f"{obj} values are different ({np.round(diff, 5)} %)" - raise_assert_detail(obj, msg, left, right, index_values=index_values) + raise_assert_detail( + obj, + msg, + left, + right, + index_values=index_values, + show_diff_only=show_diff_only, + ) raise AssertionError(err_msg) @@ -766,6 +800,7 @@ def assert_extension_array_equal( check_exact=False, rtol: float = 1.0e-5, atol: float = 1.0e-8, + show_diff_only=False, ) -> None: """ Check that left and right ExtensionArrays are equal. @@ -840,14 +875,22 @@ def assert_extension_array_equal( left_na = np.asarray(left.isna()) right_na = np.asarray(right.isna()) assert_numpy_array_equal( - left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values + left_na, + right_na, + obj="ExtensionArray NA mask", + index_values=index_values, + show_diff_only=show_diff_only, ) left_valid = left[~left_na].to_numpy(dtype=object) right_valid = right[~right_na].to_numpy(dtype=object) if check_exact: assert_numpy_array_equal( - left_valid, right_valid, obj="ExtensionArray", index_values=index_values + left_valid, + right_valid, + obj="ExtensionArray", + index_values=index_values, + show_diff_only=show_diff_only, ) else: _testing.assert_almost_equal( @@ -858,6 +901,7 @@ def assert_extension_array_equal( atol=atol, obj="ExtensionArray", index_values=index_values, + show_diff_only=show_diff_only, ) @@ -882,6 +926,7 @@ def assert_series_equal( *, check_index=True, check_like=False, + show_diff_only=False, ) -> None: """ Check that left and right Series are equal. @@ -1038,6 +1083,7 @@ def assert_series_equal( right_values, check_dtype=check_dtype, index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) else: assert_numpy_array_equal( @@ -1046,6 +1092,7 @@ def assert_series_equal( check_dtype=check_dtype, obj=str(obj), index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) elif check_datetimelike_compat and ( needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype) @@ -1075,6 +1122,7 @@ def assert_series_equal( check_dtype=bool(check_dtype), obj=str(obj), index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype): assert_extension_array_equal( @@ -1084,6 +1132,7 @@ def assert_series_equal( atol=atol, check_dtype=check_dtype, index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) elif is_extension_array_dtype_and_needs_i8_conversion( left.dtype, right.dtype @@ -1093,6 +1142,7 @@ def assert_series_equal( right._values, check_dtype=check_dtype, index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype): # DatetimeArray or TimedeltaArray @@ -1101,6 +1151,7 @@ def assert_series_equal( right._values, check_dtype=check_dtype, index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) else: _testing.assert_almost_equal( @@ -1111,6 +1162,7 @@ def assert_series_equal( check_dtype=bool(check_dtype), obj=str(obj), index_values=np.asarray(left.index), + show_diff_only=show_diff_only, ) # metadata comparison @@ -1149,6 +1201,7 @@ def assert_frame_equal( rtol=1.0e-5, atol=1.0e-8, obj="DataFrame", + show_diff_only=False, ) -> None: """ Check that left and right DataFrame are equal. @@ -1323,7 +1376,11 @@ def assert_frame_equal( assert dtype in lblocks assert dtype in rblocks assert_frame_equal( - lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj + lblocks[dtype], + rblocks[dtype], + check_dtype=check_dtype, + obj=obj, + show_diff_only=show_diff_only, ) # compare by columns @@ -1353,6 +1410,7 @@ def assert_frame_equal( atol=atol, check_index=False, check_flags=False, + show_diff_only=show_diff_only, ) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index c3c5f2fdc9d29..9e9a48729b8f6 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -1,3 +1,4 @@ +from numpy import ones import pytest import pandas as pd @@ -264,6 +265,23 @@ def test_assert_frame_equal_interval_dtype_mismatch(): tm.assert_frame_equal(left, right, check_dtype=True) +def test_assert_frame_equal_show_diff_only(): + msg = ( + 'DataFrame.iloc\\[:, 0\\] \\(column name="a"\\) are different\n\n' + 'DataFrame.iloc\\[:, 0\\] \\(column name="a"\\) ' + "values are different \\(0.27397 \\%\\)\n" + " left right\n364 1.0 0.0" + ) + df1 = DataFrame( + ones((365, 3)), + columns=["a", "b", "c"], + ) + df2 = df1.copy(deep=True) + df2.iloc[-1, 0] = 0 + with pytest.raises(AssertionError, match=msg): + tm.assert_frame_equal(df1, df2, show_diff_only=True) + + @pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): # https://github.com/pandas-dev/pandas/issues/35715
- [x] closes #47910 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. The change is super small so I figured it didn't really make sense to add it to the `whatsnew` section but if I should still do this please let me know I didn't receive feedback on whether or not it was okay to make this the default behavior but doing so minimizes the code impact and is a reasonable default in my opinion so I'm submitting it like this. If this needs to e changed feel free to let me know.
https://api.github.com/repos/pandas-dev/pandas/pulls/48033
2022-08-11T06:25:09Z
2022-08-15T05:31:28Z
null
2022-08-15T05:31:28Z
TST: catch some test warnings
diff --git a/pandas/tests/apply/test_frame_apply.py b/pandas/tests/apply/test_frame_apply.py index 1ef0865fff552..ff3abaf819206 100644 --- a/pandas/tests/apply/test_frame_apply.py +++ b/pandas/tests/apply/test_frame_apply.py @@ -1583,7 +1583,7 @@ def test_apply_on_empty_dataframe(): # GH 39111 df = DataFrame({"a": [1, 2], "b": [3, 0]}) result = df.head(0).apply(lambda x: max(x["a"], x["b"]), axis=1) - expected = Series([]) + expected = Series([], dtype=np.float64) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index a30fb13dbb966..ac12b513aad4e 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -152,14 +152,16 @@ def test_fillna_consistency(self): tm.assert_series_equal(result, expected) # where (we ignore the errors=) - result = ser.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) + with tm.assert_produces_warning(FutureWarning, match="the 'errors' keyword"): + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) tm.assert_series_equal(result, expected) - result = ser.where( - [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" - ) + with tm.assert_produces_warning(FutureWarning, match="the 'errors' keyword"): + result = ser.where( + [True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore" + ) tm.assert_series_equal(result, expected) # with a non-datetime diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py index 624496ea26a81..8e1384db024b0 100644 --- a/pandas/tests/series/test_ufunc.py +++ b/pandas/tests/series/test_ufunc.py @@ -434,11 +434,11 @@ def __repr__(self) -> str: def test_outer(): # https://github.com/pandas-dev/pandas/issues/27186 - s = pd.Series([1, 2, 3]) - o = np.array([1, 2, 3]) + ser = pd.Series([1, 2, 3]) + obj = np.array([1, 2, 3]) with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN): - np.subtract.outer(s, o) + np.subtract.outer(ser, obj) def test_np_matmul(): @@ -446,7 +446,10 @@ def test_np_matmul(): df1 = pd.DataFrame(data=[[-1, 1, 10]]) df2 = pd.DataFrame(data=[-1, 1, 10]) expected_result = pd.DataFrame(data=[102]) + + with tm.assert_produces_warning(FutureWarning, match="on non-aligned"): + result = np.matmul(df1, df2) tm.assert_frame_equal( expected_result, - np.matmul(df1, df2), + result, ) diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 963af81bcb6a5..f8600956aa2f9 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -184,10 +184,12 @@ def test_series_equal_index_mismatch(check_index): def test_series_invalid_param_combination(): + left = Series(dtype=object) + right = Series(dtype=object) with pytest.raises( ValueError, match="check_like must be False if check_index is False" ): - tm.assert_series_equal(Series(), Series(), check_index=False, check_like=True) + tm.assert_series_equal(left, right, check_index=False, check_like=True) def test_series_equal_length_mismatch(rtol): diff --git a/pandas/tests/window/moments/test_moments_consistency_ewm.py b/pandas/tests/window/moments/test_moments_consistency_ewm.py index f9f09bffb14b1..aaa1010a4798b 100644 --- a/pandas/tests/window/moments/test_moments_consistency_ewm.py +++ b/pandas/tests/window/moments/test_moments_consistency_ewm.py @@ -220,9 +220,10 @@ def test_ewm_consistency_series_cov_corr( # check that corr(x, y) == cov(x, y) / (std(x) * # std(y)) - corr_x_y = series_data.ewm( - com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na - ).corr(series_data, bias=bias) + with tm.assert_produces_warning(FutureWarning, match="Passing additional kwargs"): + corr_x_y = series_data.ewm( + com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na + ).corr(series_data, bias=bias) std_x = series_data.ewm( com=com, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na ).std(bias=bias)
I get a hundred and change warnings locally when I run the test suite, am surprised the npdev build isn't failing on these. This gets a handful of them.
https://api.github.com/repos/pandas-dev/pandas/pulls/48031
2022-08-11T01:06:09Z
2022-08-11T16:57:49Z
2022-08-11T16:57:49Z
2022-08-11T19:04:57Z
BUG: use uuid4 instead of md5 for fake column in cross merge
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a0d33cb513722..d794692de5005 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1095,6 +1095,7 @@ Reshaping - Bug in :func:`concat` not sorting the column names when ``None`` is included (:issue:`47331`) - Bug in :func:`concat` with identical key leads to error when indexing :class:`MultiIndex` (:issue:`46519`) - Bug in :func:`pivot_table` raising ``TypeError`` when ``dropna=True`` and aggregation column has extension array dtype (:issue:`47477`) +- Bug in :func:`merge` raising error for ``how="cross"`` when using ``FIPS`` mode in ssl library (:issue:`48024`) - Bug in :meth:`DataFrame.join` with a list when using suffixes to join DataFrames with duplicate column names (:issue:`46396`) - Bug in :meth:`DataFrame.pivot_table` with ``sort=False`` results in sorted index (:issue:`17041`) - Bug in :meth:`concat` when ``axis=1`` and ``sort=False`` where the resulting Index was a :class:`Int64Index` instead of a :class:`RangeIndex` (:issue:`46675`) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 77a0d34132da0..4b8547bd6a232 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -6,13 +6,13 @@ import copy import datetime from functools import partial -import hashlib import string from typing import ( TYPE_CHECKING, Hashable, cast, ) +import uuid import warnings import numpy as np @@ -1311,7 +1311,7 @@ def _create_cross_configuration( DataFrames with cross_col, the merge operation set to inner and the column to join over. """ - cross_col = f"_cross_{hashlib.md5().hexdigest()}" + cross_col = f"_cross_{uuid.uuid4()}" how = "inner" return ( left.assign(**{cross_col: 1}),
- [x] closes #48024 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @mroeschke Any idea how to test this? Did not add it for the test, do you think thats necessary?
https://api.github.com/repos/pandas-dev/pandas/pulls/48029
2022-08-10T21:40:08Z
2022-08-11T00:55:24Z
2022-08-11T00:55:24Z
2022-08-11T10:30:51Z
ENH: Support masks in groupby prod
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index c197f3df45814..6522a8008aee6 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1112,7 +1112,7 @@ Groupby/resample/rolling - Bug when using ``engine="numba"`` would return the same jitted function when modifying ``engine_kwargs`` (:issue:`46086`) - Bug in :meth:`.DataFrameGroupBy.transform` fails when ``axis=1`` and ``func`` is ``"first"`` or ``"last"`` (:issue:`45986`) - Bug in :meth:`DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`) -- Bug in :meth:`.GroupBy.sum` and :meth:`.GroupBy.cumsum` with integer dtypes losing precision (:issue:`37493`) +- Bug in :meth:`.GroupBy.sum`, :meth:`.GroupBy.prod` and :meth:`.GroupBy.cumsum` with integer dtypes losing precision (:issue:`37493`) - Bug in :meth:`.GroupBy.cumsum` with ``timedelta64[ns]`` dtype failing to recognize ``NaT`` as a null value (:issue:`46216`) - Bug in :meth:`.GroupBy.cumsum` with integer dtypes causing overflows when sum was bigger than maximum of dtype (:issue:`37493`) - Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable dtypes incorrectly altering the original data in place (:issue:`46220`) diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index c8e9df6cd6b38..04db0c9b90bc5 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -63,10 +63,12 @@ def group_sum( is_datetimelike: bool = ..., ) -> None: ... def group_prod( - out: np.ndarray, # floating[:, ::1] + out: np.ndarray, # int64float_t[:, ::1] counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[floating, ndim=2] + values: np.ndarray, # ndarray[int64float_t, ndim=2] labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., min_count: int = ..., ) -> None: ... def group_var( diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 563abf949dbbc..299dfdf177d91 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -682,10 +682,12 @@ def group_sum( @cython.wraparound(False) @cython.boundscheck(False) def group_prod( - floating[:, ::1] out, + int64float_t[:, ::1] out, int64_t[::1] counts, - ndarray[floating, ndim=2] values, + ndarray[int64float_t, ndim=2] values, const intp_t[::1] labels, + const uint8_t[:, ::1] mask, + uint8_t[:, ::1] result_mask=None, Py_ssize_t min_count=0, ) -> None: """ @@ -693,10 +695,11 @@ def group_prod( """ cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - floating val, count - floating[:, ::1] prodx + int64float_t val, count + int64float_t[:, ::1] prodx int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) + bint isna_entry, uses_mask = mask is not None if len_values != len_labels: raise ValueError("len(index) != len(labels)") @@ -716,15 +719,32 @@ def group_prod( for j in range(K): val = values[i, j] - # not nan - if val == val: + if uses_mask: + isna_entry = mask[i, j] + elif int64float_t is float32_t or int64float_t is float64_t: + isna_entry = not val == val + else: + isna_entry = False + + if not isna_entry: nobs[lab, j] += 1 prodx[lab, j] *= val for i in range(ncounts): for j in range(K): if nobs[i, j] < min_count: - out[i, j] = NAN + + # else case is not possible + if uses_mask: + result_mask[i, j] = True + # Be deterministic, out was initialized as empty + out[i, j] = 0 + elif int64float_t is float32_t or int64float_t is float64_t: + out[i, j] = NAN + else: + # we only get here when < mincount which gets handled later + pass + else: out[i, j] = prodx[i, j] diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 540825b33c073..418a222a0bfa6 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -159,6 +159,7 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: "sum", "ohlc", "cumsum", + "prod", } _cython_arity = {"ohlc": 4} # OHLC @@ -221,13 +222,13 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: values = ensure_float64(values) elif values.dtype.kind in ["i", "u"]: - if how in ["var", "prod", "mean"] or ( + if how in ["var", "mean"] or ( self.kind == "transform" and self.has_dropped_na ): # result may still include NaN, so we have to cast values = ensure_float64(values) - elif how in ["sum", "ohlc", "cumsum"]: + elif how in ["sum", "ohlc", "prod", "cumsum"]: # Avoid overflow during group op if values.dtype.kind == "i": values = ensure_int64(values) @@ -597,8 +598,16 @@ def _call_cython_op( min_count=min_count, is_datetimelike=is_datetimelike, ) - elif self.how == "ohlc": - func(result, counts, values, comp_ids, min_count, mask, result_mask) + elif self.how in ["ohlc", "prod"]: + func( + result, + counts, + values, + comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + ) else: func(result, counts, values, comp_ids, min_count, **kwargs) else: @@ -631,8 +640,8 @@ def _call_cython_op( # need to have the result set to np.nan, which may require casting, # see GH#40767 if is_integer_dtype(result.dtype) and not is_datetimelike: - # Neutral value for sum is 0, so don't fill empty groups with nan - cutoff = max(0 if self.how == "sum" else 1, min_count) + # if the op keeps the int dtypes, we have to use 0 + cutoff = max(0 if self.how in ["sum", "prod"] else 1, min_count) empty_groups = counts < cutoff if empty_groups.any(): if result_mask is not None and self.uses_mask(): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 1af94434ca1fa..ba39f76203623 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2847,8 +2847,8 @@ def test_single_element_list_grouping(): values, _ = next(iter(df.groupby(["a"]))) [email protected]("func", ["sum", "cumsum"]) -def test_groupby_sum_avoid_casting_to_float(func): [email protected]("func", ["sum", "cumsum", "prod"]) +def test_groupby_avoid_casting_to_float(func): # GH#37493 val = 922337203685477580 df = DataFrame({"a": 1, "b": [val]}) @@ -2859,12 +2859,13 @@ def test_groupby_sum_avoid_casting_to_float(func): tm.assert_frame_equal(result, expected) -def test_groupby_sum_support_mask(any_numeric_ea_dtype): [email protected]("func, val", [("sum", 3), ("prod", 2)]) +def test_groupby_sum_support_mask(any_numeric_ea_dtype, func, val): # GH#37493 df = DataFrame({"a": 1, "b": [1, 2, pd.NA]}, dtype=any_numeric_ea_dtype) - result = df.groupby("a").sum() + result = getattr(df.groupby("a"), func)() expected = DataFrame( - {"b": [3]}, + {"b": [val]}, index=Index([1], name="a", dtype=any_numeric_ea_dtype), dtype=any_numeric_ea_dtype, ) @@ -2887,6 +2888,14 @@ def test_groupby_overflow(val, dtype): expected = DataFrame({"b": [val, val * 2]}, dtype=f"{dtype}64") tm.assert_frame_equal(result, expected) + result = df.groupby("a").prod() + expected = DataFrame( + {"b": [val * val]}, + index=Index([1], name="a", dtype=f"{dtype}64"), + dtype=f"{dtype}64", + ) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("skipna, val", [(True, 3), (False, pd.NA)]) def test_groupby_cumsum_mask(any_numeric_ea_dtype, skipna, val):
- [x] xref #37493 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @jorisvandenbossche This will have conflicts with #48018, will resolve after this is merged. Gives us the opportunity to simplify tests too
https://api.github.com/repos/pandas-dev/pandas/pulls/48027
2022-08-10T21:05:14Z
2022-08-29T21:44:03Z
2022-08-29T21:44:03Z
2022-08-29T21:46:21Z
DEPR fixup warning message of MultiIndex.lexsort_depth deprecation
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a0d33cb513722..f51f31af4799a 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -843,6 +843,7 @@ Other Deprecations - Deprecated setting a categorical's categories with ``cat.categories = ['a', 'b', 'c']``, use :meth:`Categorical.rename_categories` instead (:issue:`37643`) - Deprecated unused arguments ``encoding`` and ``verbose`` in :meth:`Series.to_excel` and :meth:`DataFrame.to_excel` (:issue:`47912`) - Deprecated producing a single element when iterating over a :class:`DataFrameGroupBy` or a :class:`SeriesGroupBy` that has been grouped by a list of length 1; A tuple of length one will be returned instead (:issue:`42795`) +- Fixed up warning message of deprecation of :meth:`MultiIndex.lesort_depth` as public method, as the message previously referred to :meth:`MultiIndex.is_lexsorted` instead (:issue:`38701`) .. --------------------------------------------------------------------------- .. _whatsnew_150.performance: diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5a9b1e6943608..493d03ed1a86c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1910,7 +1910,7 @@ def _is_lexsorted(self) -> bool: @property def lexsort_depth(self) -> int: warnings.warn( - "MultiIndex.is_lexsorted is deprecated as a public function, " + "MultiIndex.lexsort_depth is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, stacklevel=find_stack_level(), diff --git a/pandas/tests/indexes/multi/test_lexsort.py b/pandas/tests/indexes/multi/test_lexsort.py index c37172ad7a980..0aadbdb5c32da 100644 --- a/pandas/tests/indexes/multi/test_lexsort.py +++ b/pandas/tests/indexes/multi/test_lexsort.py @@ -24,7 +24,10 @@ def test_is_lexsorted(self): def test_is_lexsorted_deprecation(self): # GH 32259 - with tm.assert_produces_warning(): + with tm.assert_produces_warning( + FutureWarning, + match="MultiIndex.is_lexsorted is deprecated as a public function", + ): MultiIndex.from_arrays([["a", "b", "c"], ["d", "f", "e"]]).is_lexsorted() @@ -53,5 +56,8 @@ def test_lexsort_depth(self): def test_lexsort_depth_deprecation(self): # GH 32259 - with tm.assert_produces_warning(): + with tm.assert_produces_warning( + FutureWarning, + match="MultiIndex.lexsort_depth is deprecated as a public function", + ): MultiIndex.from_arrays([["a", "b", "c"], ["d", "f", "e"]]).lexsort_depth
xref https://github.com/pandas-dev/pandas/pull/38701/files#r942749562
https://api.github.com/repos/pandas-dev/pandas/pulls/48025
2022-08-10T18:49:20Z
2022-08-12T11:38:52Z
2022-08-12T11:38:52Z
2022-08-12T11:40:47Z
PERF cache find_stack_level
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index ec1dbff6903e7..bcd85f915e4a2 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -1,3 +1,4 @@ +import inspect import numbers from operator import ( le, @@ -45,6 +46,7 @@ cnp.import_array() import warnings from pandas._libs import lib + from pandas._libs cimport util from pandas._libs.hashtable cimport Int64Vector from pandas._libs.tslibs.timedeltas cimport _Timedelta @@ -394,7 +396,7 @@ cdef class Interval(IntervalMixin): warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.inclusive diff --git a/pandas/_testing/asserters.py b/pandas/_testing/asserters.py index c7924dc451752..369e4b3454b65 100644 --- a/pandas/_testing/asserters.py +++ b/pandas/_testing/asserters.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect from typing import ( Literal, cast, @@ -112,7 +113,7 @@ def assert_almost_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -339,7 +340,7 @@ def _get_ilevel_values(index, level): "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -815,7 +816,7 @@ def assert_extension_array_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -970,7 +971,7 @@ def assert_series_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) @@ -1263,7 +1264,7 @@ def assert_frame_equal( "is deprecated and will be removed in a future version. " "You can stop passing 'check_less_precise' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) rtol = atol = _get_tol_from_less_precise(check_less_precise) diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 07fa5799fe371..b66a91826b689 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -6,6 +6,7 @@ """ from __future__ import annotations +import inspect import warnings from pandas.util._decorators import doc @@ -268,7 +269,7 @@ def decorator(accessor): f"{repr(name)} for type {repr(cls)} is overriding a preexisting " f"attribute with the same name.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) setattr(cls, name, CachedAccessor(name, accessor)) cls._accessors.add(name) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 159c0bb2e72c0..a4736c2a141a5 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -836,7 +836,9 @@ def resolve_na_sentinel( "Specify `use_na_sentinel=True` to use the sentinel value -1, and " "`use_na_sentinel=False` to encode NaN values." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) result = na_sentinel return result @@ -1658,7 +1660,7 @@ def diff(arr, n: int, axis: int = 0): "dtype lost in 'diff()'. In the future this will raise a " "TypeError. Convert to a suitable dtype prior to calling 'diff'.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) arr = np.asarray(arr) dtype = arr.dtype diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 18a0f9b7aa2ce..7a7050ea8bad7 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -291,7 +291,7 @@ def transform_dict_like(self, func): f"raised, this will raise in a future version of pandas. " f"Drop these columns/ops to avoid this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return concat(results, axis=1) @@ -423,7 +423,7 @@ def agg_list_like(self) -> DataFrame | Series: warnings.warn( depr_nuisance_columns_msg.format(failed_names), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) try: diff --git a/pandas/core/arraylike.py b/pandas/core/arraylike.py index 280a599de84ed..4e8e4ea7e8d87 100644 --- a/pandas/core/arraylike.py +++ b/pandas/core/arraylike.py @@ -6,6 +6,7 @@ """ from __future__ import annotations +import inspect import operator from typing import Any import warnings @@ -220,7 +221,7 @@ def _maybe_fallback(ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): "or align manually (eg 'df1, df2 = df1.align(df2)') before passing to " "the ufunc to obtain the future behaviour and silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # keep the first dataframe of the inputs, other DataFrame/Series is @@ -348,7 +349,9 @@ def _reconstruct(result): "to an array with '.to_numpy()' first." ) warnings.warn( - msg.format(ufunc), FutureWarning, stacklevel=find_stack_level() + msg.format(ufunc), + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) return result raise NotImplementedError diff --git a/pandas/core/arrays/arrow/_arrow_utils.py b/pandas/core/arrays/arrow/_arrow_utils.py index c9666de9f892d..81ba04a4e1426 100644 --- a/pandas/core/arrays/arrow/_arrow_utils.py +++ b/pandas/core/arrays/arrow/_arrow_utils.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import json import warnings @@ -22,7 +23,9 @@ def fallback_performancewarning(version: str | None = None) -> None: msg = "Falling back on a non-pyarrow code path which may decrease performance." if version is not None: msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning." - warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, PerformanceWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) def pyarrow_array_to_numpy_and_mask( @@ -133,7 +136,7 @@ def closed(self) -> IntervalInclusiveType: warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._inclusive diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 6c9b7adadb7b0..f268c24ca766d 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -476,7 +476,7 @@ def __init_subclass__(cls, **kwargs) -> None: f"instead. Add this argument to `{name}.factorize` to be compatible " f"with future versions of pandas and silence this warning.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) def to_numpy( diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 127814dc58f4c..b50ddd42997cb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2,6 +2,7 @@ from csv import QUOTE_NONNUMERIC from functools import partial +import inspect import operator from shutil import get_terminal_size from typing import ( @@ -394,7 +395,7 @@ def __init__( "Allowing scalars in the Categorical constructor is deprecated " "and will raise in a future version. Use `[value]` instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) values = [values] @@ -749,7 +750,7 @@ def categories(self, categories) -> None: "Setting categories in-place is deprecated and will raise in a " "future version. Use rename_categories instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self._set_categories(categories) @@ -873,7 +874,7 @@ def set_ordered( "a future version. setting ordered-ness on categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1125,7 +1126,7 @@ def rename_categories( "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1189,7 +1190,7 @@ def reorder_categories(self, new_categories, ordered=None, inplace=no_default): "a future version. Reordering categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1273,7 +1274,7 @@ def add_categories( "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1349,7 +1350,7 @@ def remove_categories(self, removals, inplace=no_default): "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1437,7 +1438,7 @@ def remove_unused_categories( "remove_unused_categories is deprecated and " "will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -2046,7 +2047,7 @@ def to_dense(self) -> np.ndarray: "Categorical.to_dense is deprecated and will be removed in " "a future version. Use np.asarray(cat) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return np.asarray(self) @@ -2063,7 +2064,7 @@ def _codes(self, value: np.ndarray): "Setting the codes on a Categorical is deprecated and will raise in " "a future version. Create a new Categorical object instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # GH#40606 NDArrayBacked.__init__(self, value, self.dtype) @@ -2088,7 +2089,7 @@ def take_nd( warn( "Categorical.take_nd is deprecated, use Categorical.take instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value) @@ -2381,7 +2382,7 @@ def mode(self, dropna: bool = True) -> Categorical: "Categorical.mode is deprecated and will be removed in a future version. " "Use Series.mode instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._mode(dropna=dropna) @@ -2524,7 +2525,7 @@ def is_dtype_equal(self, other) -> bool: "Categorical.is_dtype_equal is deprecated and will be removed " "in a future version", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) try: return self._categories_match_up_to_permutation(other) @@ -2648,7 +2649,7 @@ def replace(self, to_replace, value, inplace: bool = False) -> Categorical | Non "Categorical.replace is deprecated and will be removed in a future " "version. Use Series.replace directly instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._replace(to_replace=to_replace, value=value, inplace=inplace) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 11c236836e791..2c070499308a7 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -4,6 +4,7 @@ datetime, timedelta, ) +import inspect import operator from typing import ( TYPE_CHECKING, @@ -469,7 +470,7 @@ def astype(self, dtype, copy: bool = True): "exactly the specified dtype instead of uint64, and will " "raise if that conversion overflows.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) elif (self.asi8 < 0).any(): # GH#45034 @@ -479,7 +480,7 @@ def astype(self, dtype, copy: bool = True): "raise if the conversion overflows, as it did in this " "case with negative int64 values.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) elif dtype != np.int64: # GH#45034 @@ -489,7 +490,7 @@ def astype(self, dtype, copy: bool = True): "exactly the specified dtype instead of int64, and will " "raise if that conversion overflows.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if copy: @@ -628,7 +629,7 @@ def _validate_shift_value(self, fill_value): FutureWarning, # There is no way to hard-code the level since this might be # reached directly or called from the Index or Block method - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) fill_value = new_fill diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index ffd093b86582c..58dee30288be9 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -6,6 +6,7 @@ timedelta, tzinfo, ) +import inspect from typing import ( TYPE_CHECKING, Literal, @@ -473,7 +474,7 @@ def _check_compatible_with(self, other, setitem: bool = False): "timezone. To retain the old behavior, explicitly cast to " "object dtype before the operation.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'") @@ -1139,7 +1140,7 @@ def to_perioddelta(self, freq) -> TimedeltaArray: "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays.timedeltas import TimedeltaArray @@ -1341,7 +1342,7 @@ def weekofyear(self): "weekofyear and return an Index, you may call " "pd.Int64Index(idx.isocalendar().week)", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) week_series = self.isocalendar().week if week_series.hasnans: @@ -2238,7 +2239,7 @@ def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None = None): "before passing the data to pandas. To get the future behavior, " "first cast to 'int64'.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) elif is_timedelta64_dtype(data.dtype) or is_bool_dtype(data.dtype): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index e7198a95c07f1..bd765b4601b01 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import operator from operator import ( le, @@ -1380,7 +1381,7 @@ def closed(self) -> IntervalInclusiveType: warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.dtype.inclusive @@ -1432,7 +1433,7 @@ def set_closed( "set_closed is deprecated and will be removed in a future version. " "Use set_inclusive instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.set_inclusive(closed) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index f946f881311c1..e9302efdce2e7 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -4,6 +4,7 @@ from __future__ import annotations from collections import abc +import inspect import numbers import operator from typing import ( @@ -414,7 +415,7 @@ def __init__( "to construct an array with the desired repeats of the " "scalar value instead.\n\n", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if index is not None and not is_scalar(data): @@ -493,7 +494,7 @@ def __init__( "loses timezone information. Cast to object before " "sparse to retain timezone information.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) data = np.asarray(data, dtype="datetime64[ns]") if fill_value is NaT: @@ -1182,7 +1183,9 @@ def searchsorted( ) -> npt.NDArray[np.intp] | np.intp: msg = "searchsorted requires high memory usage." - warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, PerformanceWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) if not is_scalar(v): v = np.asarray(v) v = np.asarray(v) @@ -1322,7 +1325,7 @@ def astype(self, dtype: AstypeArg | None = None, copy: bool = True): "array with the requested dtype. To retain the old behavior, use " "`obj.astype(SparseDtype(dtype))`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) dtype = self.dtype.update_dtype(dtype) diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index eaed6257736ba..cbe283b50b4f7 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -1,6 +1,7 @@ """Sparse Dtype""" from __future__ import annotations +import inspect import re from typing import ( TYPE_CHECKING, @@ -409,7 +410,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: f"values: '{fill_values}'. Picking the first and " "converting the rest.", PerformanceWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] diff --git a/pandas/core/base.py b/pandas/core/base.py index f7e6c4434da32..e1775628d09ee 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,6 +4,7 @@ from __future__ import annotations +import inspect import textwrap from typing import ( TYPE_CHECKING, @@ -1064,7 +1065,7 @@ def is_monotonic(self) -> bool: "is_monotonic is deprecated and will be removed in a future version. " "Use is_monotonic_increasing instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.is_monotonic_increasing diff --git a/pandas/core/common.py b/pandas/core/common.py index 980e7a79414ba..41ed68e73a4c0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -169,7 +169,7 @@ def cast_scalar_indexer(val, warn_float: bool = False): "Indexing with a float is deprecated, and will raise an IndexError " "in pandas 2.0. You can manually convert to an integer key instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return int(val) return val @@ -697,4 +697,6 @@ def deprecate_numeric_only_default( "this warning." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 2e7a0f842ee6d..958e605727f27 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -7,6 +7,7 @@ partial, wraps, ) +import inspect from typing import ( TYPE_CHECKING, Callable, @@ -131,7 +132,9 @@ def _align_core(terms): f"by more than {ordm:.4g}; performance may suffer." ) warnings.warn( - w, category=PerformanceWarning, stacklevel=find_stack_level() + w, + category=PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) f = partial(ti.reindex, reindexer, axis=axis, copy=False) diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index ea70d0130a119..fa0ef46b850d1 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect import tokenize from typing import TYPE_CHECKING import warnings @@ -311,7 +312,7 @@ def eval( "will be removed in a future version." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) exprs: list[str | BinOp] diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 8c1a3fece255e..6c32cc98df9ac 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -11,6 +11,7 @@ """ from __future__ import annotations +import inspect import os from typing import Callable import warnings @@ -370,7 +371,7 @@ def _deprecate_column_space(key): "in a future version. Use df.to_string(col_space=...) " "instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) cf.register_option("column_space", 12, validator=is_int, cb=_deprecate_column_space) @@ -397,7 +398,7 @@ def _deprecate_negative_int_max_colwidth(key): "will not be supported in future version. Instead, use None " "to not limit the column width.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) cf.register_option( diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 4b63d492ec1dd..e1a69086609e9 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -6,6 +6,7 @@ """ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, Any, @@ -568,7 +569,7 @@ def sanitize_array( "passed dtype. To retain the old behavior, call Series(arr) or " "DataFrame(arr) without passing a dtype.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) subarr = np.array(data, copy=copy) except ValueError: @@ -580,7 +581,7 @@ def sanitize_array( "if they cannot be cast losslessly (matching Series behavior). " "To retain the old behavior, use DataFrame(data).astype(dtype)", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # GH#40110 until the deprecation is enforced, we _dont_ # ignore the dtype for DataFrame, and _do_ cast even though @@ -852,7 +853,7 @@ def _try_cast( "passed to 'DataFrame', either all columns will be cast to that " "dtype, or a TypeError will be raised.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) subarr = np.array(arr, dtype=object, copy=copy) return subarr diff --git a/pandas/core/describe.py b/pandas/core/describe.py index c70dbe0b8b0b1..d265a307078b9 100644 --- a/pandas/core/describe.py +++ b/pandas/core/describe.py @@ -9,6 +9,7 @@ ABC, abstractmethod, ) +import inspect from typing import ( TYPE_CHECKING, Any, @@ -373,7 +374,7 @@ def select_describe_func( "version of pandas. Specify `datetime_is_numeric=True` to " "silence this warning and adopt the future behavior now.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return describe_timestamp_as_categorical_1d elif is_timedelta64_dtype(data.dtype): diff --git a/pandas/core/dtypes/astype.py b/pandas/core/dtypes/astype.py index 7fb58468746a8..6d04dd755dbfd 100644 --- a/pandas/core/dtypes/astype.py +++ b/pandas/core/dtypes/astype.py @@ -371,7 +371,7 @@ def astype_dt64_to_dt64tz( "timezone-aware dtype is deprecated and will raise in a " "future version. Use ser.dt.tz_localize instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # GH#33401 this doesn't match DatetimeArray.astype, which @@ -387,7 +387,7 @@ def astype_dt64_to_dt64tz( "timezone-aware dtype is deprecated and will raise in a " "future version. Use obj.tz_localize instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return values.tz_localize(dtype.tz) @@ -407,7 +407,7 @@ def astype_dt64_to_dt64tz( "future version. Use obj.tz_localize(None) or " "obj.tz_convert('UTC').tz_localize(None) instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) result = values.tz_convert("UTC").tz_localize(None) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 769656d1c4755..5340bc6b590c4 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -10,6 +10,7 @@ timedelta, ) import functools +import inspect from typing import ( TYPE_CHECKING, Any, @@ -624,7 +625,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan): "dtype is deprecated. In a future version, this will be cast " "to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return dtype, fv elif isinstance(fill_value, str): @@ -1277,7 +1278,7 @@ def try_timedelta(v: np.ndarray) -> np.ndarray: "and will be removed in a future version. To retain the old behavior " f"explicitly pass Series(data, dtype={value.dtype})", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return value @@ -1336,7 +1337,7 @@ def maybe_cast_to_datetime( "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # equiv: dta.view(dtype) # Note: NOT equivalent to dta.astype(dtype) @@ -1376,7 +1377,7 @@ def maybe_cast_to_datetime( ".tz_localize('UTC').tz_convert(dtype.tz) " "or pd.Series(data.view('int64'), dtype=dtype)", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) value = dta.tz_localize("UTC").tz_convert(dtype.tz) @@ -1745,7 +1746,7 @@ def _maybe_unbox_datetimelike_tz_deprecation(value: Scalar, dtype: DtypeObj): "`pd.Series(values).dt.tz_localize(None)` " "instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) new_value = value.tz_localize(None) return _maybe_unbox_datetimelike(new_value, dtype) @@ -1863,7 +1864,7 @@ def maybe_cast_to_integer_array( "In a future version this will raise OverflowError. To retain the " f"old behavior, use pd.Series(values).astype({dtype})", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return casted @@ -1874,7 +1875,7 @@ def maybe_cast_to_integer_array( f"dtype={dtype} is deprecated and will raise in a future version. " "Use values.view(dtype) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return casted diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c10461b2fc7f8..be4d50af8a053 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect from typing import ( Any, Callable, @@ -307,7 +308,7 @@ def is_categorical(arr) -> bool: "is_categorical is deprecated and will be removed in a future version. " "Use is_categorical_dtype instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr) @@ -1386,7 +1387,7 @@ def is_extension_type(arr) -> bool: "'is_extension_type' is deprecated and will be removed in a future " "version. Use 'is_extension_array_dtype' instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if is_categorical_dtype(arr): diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 059df4009e2f6..80efe96ae7146 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, cast, @@ -152,7 +153,7 @@ def is_nonempty(x) -> bool: "(instead of coercing bools to numeric values). To retain the old " "behavior, explicitly cast bool-dtype arrays to numeric dtype.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return result diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 99b2082d409a9..c2c600adbbe09 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect import re from typing import ( TYPE_CHECKING, @@ -1194,7 +1195,7 @@ def closed(self): warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._inclusive diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 893e4a9be58ef..78cf76e747d1d 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -3,6 +3,7 @@ from __future__ import annotations from collections import abc +import inspect from numbers import Number import re from typing import Pattern @@ -459,7 +460,7 @@ def is_inferred_bool_dtype(arr: ArrayLike) -> bool: "will not be included in reductions with bool_only=True. " "Explicitly cast to bool dtype instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return result diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 49e5bc24786dd..0a7a6494d04eb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -14,6 +14,7 @@ from collections import abc import datetime import functools +import inspect from io import StringIO import itertools from textwrap import dedent @@ -673,7 +674,7 @@ def __init__( "removed in a future version. Pass " "{name: data[name] for name in data.dtype.names} instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # a masked array @@ -1324,7 +1325,7 @@ def iteritems(self) -> Iterable[tuple[Hashable, Series]]: "iteritems is deprecated and will be removed in a future version. " "Use .items instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) yield from self.items() @@ -1909,7 +1910,7 @@ def to_dict(self, orient: str = "dict", into=dict): warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # GH16122 into_c = com.standardize_mapping(into) @@ -1930,7 +1931,7 @@ def to_dict(self, orient: str = "dict", into=dict): "will be used in a future version. Use one of the above " "to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if orient.startswith("d"): @@ -2773,7 +2774,7 @@ def to_markdown( "'showindex' is deprecated. Only 'index' will be used " "in a future version. Use 'index' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) kwargs.setdefault("headers", "keys") @@ -3387,7 +3388,7 @@ def info( warnings.warn( "null_counts is deprecated. Use show_counts instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) show_counts = null_counts info = DataFrameInfo( @@ -3772,7 +3773,7 @@ def _getitem_bool_array(self, key): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) elif len(key) != len(self.index): raise ValueError( @@ -4862,7 +4863,9 @@ def lookup( "You can use DataFrame.melt and DataFrame.loc " "as a substitute." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) n = len(row_labels) if n != len(col_labels): @@ -8364,7 +8367,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: squeeze = False @@ -9735,7 +9738,7 @@ def append( "and will be removed from pandas in a future version. " "Use pandas.concat instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._append(other, ignore_index, verify_integrity, sort) @@ -10701,7 +10704,7 @@ def count( "deprecated and will be removed in a future version. Use groupby " "instead. df.count(level=1) should use df.groupby(level=1).count().", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) res = self._count_level(level, axis=axis, numeric_only=numeric_only) return res.__finalize__(self, method="count") @@ -10803,7 +10806,7 @@ def _reduce( "will include datetime64 and datetime64tz columns in a " "future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # Non-copy equivalent to # dt64_cols = self.dtypes.apply(is_datetime64_any_dtype) @@ -10904,7 +10907,7 @@ def _get_data() -> DataFrame: "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if hasattr(result, "dtype"): diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 003fe2571401f..8096b57168d8c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5,6 +5,7 @@ from datetime import timedelta import functools import gc +import inspect import json import operator import pickle @@ -493,7 +494,7 @@ def _AXIS_NUMBERS(self) -> dict[str, int]: warnings.warn( "_AXIS_NUMBERS has been deprecated.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return {"index": 0} @@ -3375,7 +3376,9 @@ def to_latex( "to use `DataFrame.style.to_latex` which also contains additional " "functionality." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) # Get defaults from the pandas config if self.ndim == 1: @@ -3804,7 +3807,7 @@ class max_speed "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) nv.validate_take((), kwargs) @@ -3960,7 +3963,7 @@ class animal locomotion "Passing lists as key for xs is deprecated and will be removed in a " "future version. Pass key as a tuple instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if level is not None: @@ -4145,7 +4148,11 @@ def _check_setitem_copy(self, t="setting", force=False): if value == "raise": raise SettingWithCopyError(t) elif value == "warn": - warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) + warnings.warn( + t, + SettingWithCopyWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) def __delitem__(self, key) -> None: """ @@ -5883,7 +5890,7 @@ def __setattr__(self, name: str, value) -> None: "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) object.__setattr__(self, name, value) @@ -8262,7 +8269,7 @@ def between_time( "`include_start` and `include_end` are deprecated in " "favour of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) left = True if include_start is lib.no_default else include_start right = True if include_end is lib.no_default else include_end @@ -8978,7 +8985,7 @@ def rank( "and will raise in a future version. Pass either 'True' or " "'False'. 'False' will be the default.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) warned = True elif numeric_only is lib.no_default: @@ -9034,7 +9041,7 @@ def ranker(data): "is deprecated; in a future version this will raise TypeError. " "Select only valid columns before calling rank.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if numeric_only: @@ -9045,7 +9052,7 @@ def ranker(data): f"{self.dtype} is deprecated and will raise a TypeError in a " "future version of pandas", category=FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) data = self._get_numeric_data() else: @@ -9816,7 +9823,7 @@ def where( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._where(cond, other, inplace, axis, level) @@ -9894,7 +9901,7 @@ def mask( "try_cast keyword is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # see gh-21891 @@ -10089,7 +10096,9 @@ def slice_shift(self: NDFrameT, periods: int = 1, axis=0) -> NDFrameT: "and will be removed in a future version. " "You can use DataFrame/Series.shift instead." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) if periods == 0: return self @@ -10142,7 +10151,7 @@ def tshift(self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0) -> NDFra "Please use shift instead." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if freq is None: @@ -10975,7 +10984,7 @@ def _logical_func( "deprecated and will be removed in a future version. Use groupby " "instead. df.any(level=1) should use df.groupby(level=1).any()", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if bool_only is not None: raise NotImplementedError( @@ -11109,7 +11118,7 @@ def _stat_function_ddof( "deprecated and will be removed in a future version. Use groupby " "instead. df.var(level=1) should use df.groupby(level=1).var().", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, ddof=ddof @@ -11183,7 +11192,7 @@ def _stat_function( f"scalar {name} over the entire DataFrame. To retain the old " f"behavior, use 'frame.{name}(axis=0)' or just 'frame.{name}()'", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if axis is lib.no_default: axis = None @@ -11196,7 +11205,7 @@ def _stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.median(level=1) should use df.groupby(level=1).median().", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._agg_by_level( name, axis=axis, level=level, skipna=skipna, numeric_only=numeric_only @@ -11320,7 +11329,7 @@ def _min_count_stat_function( "deprecated and will be removed in a future version. Use groupby " "instead. df.sum(level=1) should use df.groupby(level=1).sum().", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._agg_by_level( name, @@ -11408,7 +11417,9 @@ def mad( "The 'mad' method is deprecated and will be removed in a future version. " "To compute the same result, you may do `(df - df.mean()).abs().mean()`." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) if not is_bool(skipna): warnings.warn( @@ -11416,7 +11427,7 @@ def mad( "version. Pass True instead. Only boolean values will be allowed " "in the future.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) skipna = True if axis is None: @@ -11427,7 +11438,7 @@ def mad( "deprecated and will be removed in a future version. Use groupby " "instead. df.mad(level=1) should use df.groupby(level=1).mad()", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna) @@ -11874,7 +11885,7 @@ def expanding( warnings.warn( "The `center` argument on `expanding` will be removed in the future.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: center = False diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 0f9befa7cff78..8a261f09e7118 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -9,6 +9,7 @@ from collections import abc from functools import partial +import inspect from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -1235,7 +1236,7 @@ def _transform_general(self, func, *args, **kwargs): "`.to_numpy()` to the result in the transform function to keep " "the current behavior and silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) concat_index = obj.columns if self.axis == 0 else obj.index @@ -1405,7 +1406,7 @@ def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: "Indexing with multiple keys (implicitly converted to a tuple " "of keys) will be deprecated, use a list instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return super().__getitem__(key) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8e0ed959fabc3..138474e21fb57 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -833,7 +833,7 @@ def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: "to avoid this warning." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.grouper.get_iterator(self._selected_obj, axis=self.axis) @@ -1357,7 +1357,7 @@ def _resolve_numeric_only( f"numeric_only={numeric_only} and dtype {self.obj.dtype}. This will " "raise a TypeError in a future version of pandas", category=FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) raise NotImplementedError( f"{type(self).__name__}.{how} does not implement numeric_only" @@ -1619,7 +1619,9 @@ def _python_apply_general( "To adopt the future behavior and silence this warning, use " "\n\n\t>>> .groupby(..., group_keys=True)" ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) # We want to behave as if `self.group_keys=False` when reconstructing # the object. However, we don't want to mutate the stateful GroupBy # object, so we just override it. @@ -2940,7 +2942,7 @@ def pad(self, limit=None): "pad is deprecated and will be removed in a future version. " "Use ffill instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.ffill(limit=limit) @@ -2976,7 +2978,7 @@ def backfill(self, limit=None): "backfill is deprecated and will be removed in a future version. " "Use bfill instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.bfill(limit=limit) @@ -4362,7 +4364,7 @@ def warn_dropping_nuisance_columns_deprecated(cls, how: str, numeric_only) -> No f"Before calling .{how}, select only columns which " "should be valid for the function.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) elif numeric_only is lib.no_default: warnings.warn( @@ -4372,5 +4374,5 @@ def warn_dropping_nuisance_columns_deprecated(cls, how: str, numeric_only) -> No f"Either specify numeric_only or select only columns which " "should be valid for the function.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index b9f4166b475ca..04ebc00b8e964 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -4,6 +4,7 @@ """ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, Any, @@ -981,7 +982,7 @@ def _check_deprecated_resample_kwargs(kwargs, origin): "\nbecomes:\n" '\n>>> df.resample(freq="3s", offset="2s")\n', FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if kwargs.get("loffset", None) is not None: warnings.warn( @@ -992,5 +993,5 @@ def _check_deprecated_resample_kwargs(kwargs, origin): '\n>>> df = df.resample(freq="3s").mean()' '\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n', FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) diff --git a/pandas/core/index.py b/pandas/core/index.py index 19e9c6b27e4e7..519a82d680426 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1,6 +1,7 @@ # pyright: reportUnusedImport = false from __future__ import annotations +import inspect import warnings from pandas.util._exceptions import find_stack_level @@ -31,7 +32,7 @@ "pandas.core.index is deprecated and will be removed in a future version. " "The public classes are available in the top-level namespace.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) __all__: list[str] = [] diff --git a/pandas/core/indexers/utils.py b/pandas/core/indexers/utils.py index 0f3cdc4195c85..70e6ff8ab7783 100644 --- a/pandas/core/indexers/utils.py +++ b/pandas/core/indexers/utils.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, Any, @@ -348,7 +349,7 @@ def deprecate_ndim_indexing(result, stacklevel: int = 3) -> None: "is deprecated and will be removed in a future " "version. Convert to a numpy array before indexing instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) @@ -372,7 +373,7 @@ def unpack_1tuple(tup): "slice is deprecated and will raise in a future " "version. Pass a tuple instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return tup[0] diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 46959aa5cd3e2..dea38e2ed2907 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect from typing import TYPE_CHECKING import warnings @@ -291,7 +292,7 @@ def weekofyear(self): "Series.dt.weekofyear and Series.dt.week have been deprecated. " "Please use Series.dt.isocalendar().week instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) week_series = self.isocalendar().week week_series.name = self.name diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 14f9b71c5e03c..7e2a9184f04d9 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2,6 +2,7 @@ from datetime import datetime import functools +import inspect from itertools import zip_longest import operator from typing import ( @@ -437,7 +438,7 @@ def __new__( "'tupleize_cols' is deprecated and will raise TypeError in a " "future version. Use the specific Index subclass directly instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays import PandasArray @@ -615,7 +616,7 @@ def _dtype_to_subclass(cls, dtype: DtypeObj): "dense numpy ndarray. To retain the old behavior, use " "pd.Index(arr.to_numpy()) instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return cls._dtype_to_subclass(dtype.subtype) @@ -683,7 +684,7 @@ def asi8(self): warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return None @@ -797,7 +798,7 @@ def _get_attributes_dict(self) -> dict[str_t, Any]: "The Index._get_attributes_dict method is deprecated, and will be " "removed in a future version", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return {k: getattr(self, k, None) for k in self._attributes} @@ -1000,7 +1001,7 @@ def ravel(self, order="C"): "Index.ravel returning ndarray is deprecated; in a future version " "this will return a view on self.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if needs_i8_conversion(self.dtype): # Item "ndarray[Any, Any]" of "Union[ExtensionArray, ndarray[Any, Any]]" @@ -1295,7 +1296,7 @@ def copy( "parameter names is deprecated and will be removed in a future " "version. Use the name parameter instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) name = self._validate_names(name=name, names=names, deep=deep)[0] @@ -1310,7 +1311,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) new_index = new_index.astype(dtype) return new_index @@ -1502,7 +1503,7 @@ def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: "The 'to_native_types' method is deprecated and will be removed in " "a future version. Use 'astype(str)' instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) values = self if slicer is not None: @@ -1703,7 +1704,7 @@ def to_frame( "the future `None` will be used as the name of the resulting " "DataFrame column.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) name = lib.no_default @@ -2289,7 +2290,7 @@ def is_monotonic(self) -> bool: "is_monotonic is deprecated and will be removed in a future version. " "Use is_monotonic_increasing instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.is_monotonic_increasing @@ -2714,7 +2715,7 @@ def is_mixed(self) -> bool: "Index.is_mixed is deprecated and will be removed in a future version. " "Check index.inferred_type directly instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.inferred_type in ["mixed"] @@ -2759,7 +2760,7 @@ def is_all_dates(self) -> bool: "Index.is_all_dates is deprecated, will be removed in a future version. " "check index.inferred_type instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._is_all_dates @@ -3140,7 +3141,7 @@ def __and__(self, other): "in the future this will be a logical operation matching " "Series.__and__. Use index.intersection(other) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.intersection(other) @@ -3151,7 +3152,7 @@ def __or__(self, other): "in the future this will be a logical operation matching " "Series.__or__. Use index.union(other) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.union(other) @@ -3162,7 +3163,7 @@ def __xor__(self, other): "in the future this will be a logical operation matching " "Series.__xor__. Use index.symmetric_difference(other) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.symmetric_difference(other) @@ -3218,7 +3219,7 @@ def _deprecate_dti_setop(self, other: Index, setop: str_t): "object dtype. To retain the old behavior, " f"use `index.astype(object).{setop}(other)`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) @final @@ -3794,7 +3795,7 @@ def get_loc(self, key, method=None, tolerance=None): "and will raise in a future version. Use " "index.get_indexer([item], method=...) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if is_scalar(key) and isna(key) and not self.hasnans: @@ -4258,7 +4259,7 @@ def is_int(v): "lookups. To retain the old behavior, use `series.iloc[i:j]`. " "To get the future behavior, use `series.loc[i:j]`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if self.is_integer() or is_index_slice: # Note: these checks are redundant if we know is_index_slice @@ -4292,7 +4293,7 @@ def is_int(v): "and will raise TypeError in a future version. " "Use .loc with labels or .iloc with positions instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) indexer = key else: @@ -4443,7 +4444,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and " "will raise in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) target = self._wrap_reindex_result(target, indexer, preserve_names) @@ -5257,7 +5258,7 @@ def is_type_compatible(self, kind: str_t) -> bool: "Index.is_type_compatible is deprecated and will be removed in a " "future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return kind == self.inferred_type @@ -5904,7 +5905,7 @@ def get_value(self, series: Series, key): "get_value is deprecated and will be removed in a future version. " "Use Series[key] instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self._check_indexing_error(key) @@ -5972,7 +5973,7 @@ def set_value(self, arr, key, value) -> None: "will be removed in a future version." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) loc = self._engine.get_loc(key) if not can_hold_element(arr, value): @@ -7238,7 +7239,7 @@ def _deprecated_arg(self, value, name: str_t, methodname: str_t) -> None: f"'{name}' argument in {methodname} is deprecated " "and will be removed in a future version. Do not pass it.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) @@ -7492,6 +7493,6 @@ def _maybe_try_sort(result, sort): warnings.warn( f"{err}, sort order is undefined for incomparable objects.", RuntimeWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return result diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index c1ae3cb1b16ea..e068c1434fd4e 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect from typing import ( Any, Hashable, @@ -224,7 +225,7 @@ def __new__( "deprecated and will raise in a future version. " "Use CategoricalIndex([], ...) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) data = [] @@ -419,7 +420,7 @@ def reindex( "reindexing with a non-unique Index is deprecated and will " "raise in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) new_target: Index @@ -495,7 +496,7 @@ def take_nd(self, *args, **kwargs) -> CategoricalIndex: "CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take " "instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.take(*args, **kwargs) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8014d010afc1b..84955d5137383 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -4,6 +4,7 @@ from __future__ import annotations from datetime import datetime +import inspect from typing import ( TYPE_CHECKING, Any, @@ -393,7 +394,7 @@ def is_type_compatible(self, kind: str) -> bool: f"{type(self).__name__}.is_type_compatible is deprecated and will be " "removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return kind in self._data._infer_matches diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 30c770f32c2dc..2625d8c683a0c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -7,6 +7,7 @@ timedelta, tzinfo, ) +import inspect import operator from typing import ( TYPE_CHECKING, @@ -423,7 +424,7 @@ def union_many(self, others): "DatetimeIndex.union_many is deprecated and will be removed in " "a future version. Use obj.union instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) this = self @@ -547,7 +548,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "is deprecated and will be removed in a future version. " "You can stop passing 'keep_tz' to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: warnings.warn( @@ -557,7 +558,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): "can do 'idx.tz_convert(None)' before calling " "'to_series'.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: keep_tz = True @@ -660,7 +661,9 @@ def _deprecate_mismatched_indexing(self, key, one_way: bool = False) -> None: "raise KeyError in a future version. " "Use a timezone-aware object instead." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) def get_loc(self, key, method=None, tolerance=None): """ @@ -809,7 +812,7 @@ def check_str_or_none(point): "with non-existing keys is deprecated and will raise a " "KeyError in a future Version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) indexer = mask.nonzero()[0][::step] if len(indexer) == len(self): @@ -1089,7 +1092,7 @@ def date_range( warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if closed is None: inclusive = "both" diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 23f2e724e208c..5ed8f79bbbefe 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,6 +1,7 @@ """ define the IntervalIndex """ from __future__ import annotations +import inspect from operator import ( le, lt, @@ -242,7 +243,7 @@ def closed(self): warnings.warn( "Attribute `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.inclusive diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5a9b1e6943608..b282455cf6051 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,6 +1,7 @@ from __future__ import annotations from functools import wraps +import inspect from sys import getsizeof from typing import ( TYPE_CHECKING, @@ -923,7 +924,7 @@ def set_levels( warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1084,7 +1085,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity: bool = Tr warnings.warn( "inplace is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False @@ -1197,7 +1198,7 @@ def copy( "parameter levels is deprecated and will be removed in a future " "version. Use the set_levels method instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) keep_id = False if codes is not None: @@ -1205,7 +1206,7 @@ def copy( "parameter codes is deprecated and will be removed in a future " "version. Use the set_codes method instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) keep_id = False @@ -1237,7 +1238,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) new_index = new_index.astype(dtype) return new_index @@ -1800,7 +1801,7 @@ def to_frame( "the future `None` will be used as the name of the resulting " "DataFrame column.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) name = lib.no_default @@ -1869,7 +1870,7 @@ def is_lexsorted(self) -> bool: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._is_lexsorted() @@ -1913,7 +1914,7 @@ def lexsort_depth(self) -> int: "MultiIndex.is_lexsorted is deprecated as a public function, " "users should use MultiIndex.is_monotonic_increasing instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._lexsort_depth @@ -2279,7 +2280,7 @@ def drop(self, codes, level=None, errors="raise"): "dropping on a non-lexsorted multi-index " "without a level parameter may impact performance.", PerformanceWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) loc = loc.nonzero()[0] inds.extend(loc) @@ -2955,7 +2956,7 @@ def _maybe_to_slice(loc): warnings.warn( "indexing past lexsort depth may impact performance.", PerformanceWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) loc = np.arange(start, stop, dtype=np.intp) @@ -3394,7 +3395,7 @@ def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]: # TODO: how to handle IntervalIndex level? # (no test cases) FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) continue else: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index a597bea0eb724..d114fe47fa0f1 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect from typing import ( Callable, Hashable, @@ -360,7 +361,7 @@ def asi8(self) -> npt.NDArray[np.int64]: warnings.warn( "Index.asi8 is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._values.view(self._default_dtype) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index c034d9416eae7..fedcba7aa9644 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -4,6 +4,7 @@ datetime, timedelta, ) +import inspect from typing import Hashable import warnings @@ -366,7 +367,7 @@ def astype(self, dtype, copy: bool = True, how=lib.no_default): "will be removed in a future version. " "Use index.to_timestamp(how=how) instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: how = "start" diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 376c98b6e176f..9f49c7456d9ce 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,6 +1,7 @@ from __future__ import annotations from datetime import timedelta +import inspect import operator from sys import getsizeof from typing import ( @@ -263,7 +264,7 @@ def _start(self) -> int: warnings.warn( self._deprecation_message.format("_start", "start"), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.start @@ -286,7 +287,7 @@ def _stop(self) -> int: warnings.warn( self._deprecation_message.format("_stop", "stop"), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.stop @@ -310,7 +311,7 @@ def _step(self) -> int: warnings.warn( self._deprecation_message.format("_step", "step"), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.step @@ -471,7 +472,7 @@ def copy( "parameter dtype is deprecated and will be removed in a future " "version. Use the astype method instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) new_index = new_index.astype(dtype) return new_index diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 4e242e33627a4..b143e1e50aa6c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1,6 +1,7 @@ from __future__ import annotations from contextlib import suppress +import inspect from typing import ( TYPE_CHECKING, Hashable, @@ -1497,7 +1498,7 @@ def _has_valid_setitem_indexer(self, indexer) -> bool: "a future version.\n" "consider using .loc with a DataFrame indexer for automatic alignment.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if not isinstance(indexer, tuple): @@ -2026,7 +2027,7 @@ def _setitem_single_column(self, loc: int, value, plane_indexer): "`df[df.columns[i]] = newvals` or, if columns are non-unique, " "`df.isetitem(i, newvals)`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # TODO: how to get future behavior? # TODO: what if we got here indirectly via loc? @@ -2502,7 +2503,7 @@ def convert_to_index_sliceable(obj: DataFrame, key): "and will be removed in a future version. Use `frame.loc[string]` " "instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return res except (KeyError, ValueError, NotImplementedError): @@ -2656,7 +2657,7 @@ def check_deprecated_indexers(key) -> None: "Passing a set as an indexer is deprecated and will raise in " "a future version. Use a list instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if ( isinstance(key, dict) @@ -2667,5 +2668,5 @@ def check_deprecated_indexers(key) -> None: "Passing a dict as an indexer is deprecated and will raise in " "a future version. Use a list instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index ea69b567611e4..8c62576c2f2ca 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -41,6 +41,7 @@ def __getattr__(name: str): + import inspect import warnings from pandas.util._exceptions import find_stack_level @@ -50,7 +51,7 @@ def __getattr__(name: str): "CategoricalBlock is deprecated and will be removed in a future version. " "Use ExtensionBlock instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.internals.blocks import CategoricalBlock diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 46c375b92dd83..3e27cf0b15511 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1,6 +1,7 @@ from __future__ import annotations from functools import wraps +import inspect import re from typing import ( TYPE_CHECKING, @@ -181,7 +182,7 @@ def is_categorical(self) -> bool: "future version. Use isinstance(block.values, Categorical) " "instead. See https://github.com/pandas-dev/pandas/issues/40226", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return isinstance(self.values, Categorical) @@ -252,7 +253,7 @@ def make_block_same_class( "already been cast to DatetimeArray and TimedeltaArray, " "respectively.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) values = new_values @@ -1564,7 +1565,7 @@ def fillna( "(usually object) instead of raising, matching the " "behavior of other dtypes.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) raise else: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index c1d0ab730fe7e..6aad8dbd940d4 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -5,6 +5,7 @@ from __future__ import annotations from collections import abc +import inspect from typing import ( TYPE_CHECKING, Any, @@ -844,7 +845,7 @@ def to_arrays( "To retain the old behavior, pass as a dictionary " "DataFrame({col: categorical, ..})", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if columns is None: columns = default_index(len(data)) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 435992f7d5cff..4e84b013b2a11 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import itertools from typing import ( Any, @@ -909,7 +910,7 @@ def __init__( "will assume that a DatetimeTZBlock with block.ndim==2 " "has block.values.ndim == 2.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # error: Incompatible types in assignment (expression has type @@ -1248,7 +1249,7 @@ def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) def _insert_update_mgr_locs(self, loc) -> None: @@ -1707,7 +1708,7 @@ def __init__( "The `fastpath` keyword is deprecated and will be removed " "in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self.axes = [axis] diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index e9fefd9268870..dde4d07b7915c 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,6 +5,7 @@ """ from __future__ import annotations +import inspect import operator from typing import TYPE_CHECKING import warnings @@ -301,7 +302,7 @@ def to_series(right): "Do `left, right = left.align(right, axis=1, copy=False)` " "before e.g. `left == right`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) left, right = left.align( diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 54b6b32ff1a68..e3d81e01ac94c 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -2,6 +2,7 @@ import copy from datetime import timedelta +import inspect from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -549,7 +550,7 @@ def pad(self, limit=None): "pad is deprecated and will be removed in a future version. " "Use ffill instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.ffill(limit=limit) @@ -722,7 +723,7 @@ def backfill(self, limit=None): "backfill is deprecated and will be removed in a future version. " "Use bfill instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.bfill(limit=limit) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 5328c7995ea6f..3d9e4f0c69c62 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -4,6 +4,7 @@ from __future__ import annotations from collections import abc +import inspect from typing import ( TYPE_CHECKING, Callable, @@ -552,7 +553,7 @@ def __init__( "Passing non boolean values for sort is deprecated and " "will error in a future version!", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self.sort = sort diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 5de9c8e2f4108..73f6aff82f330 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import re from typing import TYPE_CHECKING import warnings @@ -59,7 +60,7 @@ def melt( "In the future this will raise an error, please set the 'value_name' " "parameter of DataFrame.melt to a unique name.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if id_vars is not None: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4b8547bd6a232..cb392eee1d589 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -6,6 +6,7 @@ import copy import datetime from functools import partial +import inspect import string from typing import ( TYPE_CHECKING, @@ -678,7 +679,9 @@ def __init__( ) # stacklevel chosen to be correct when this is reached via pd.merge # (and not DataFrame.join) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) self._validate_specification() @@ -2369,7 +2372,7 @@ def _items_overlap_with_suffix( "unexpected results. Provide 'suffixes' as a tuple instead. In the " "future a 'TypeError' will be raised.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) to_rename = left.intersection(right) @@ -2419,7 +2422,7 @@ def renamer(x, suffix): f"Passing 'suffixes' which cause duplicate columns {set(dups)} in the " f"result is deprecated and will raise a MergeError in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return llabels, rlabels diff --git a/pandas/core/series.py b/pandas/core/series.py index 206fcbe05d006..6f3769b53ad26 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect from textwrap import dedent from typing import ( IO, @@ -389,7 +390,7 @@ def __init__( "of 'float64' in a future version. Specify a dtype explicitly " "to silence this warning.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # uncomment the line below when removing the FutureWarning # dtype = np.dtype(object) @@ -916,7 +917,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> Series: "is_copy is deprecated and will be removed in a future version. " "'take' always returns a copy, so there is no need to specify this.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) nv.validate_take((), kwargs) @@ -1047,7 +1048,9 @@ def _get_values_tuple(self, key: tuple): # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) - deprecate_ndim_indexing(result, stacklevel=find_stack_level()) + deprecate_ndim_indexing( + result, stacklevel=find_stack_level(inspect.currentframe()) + ) return result if not isinstance(self.index, MultiIndex): @@ -1111,7 +1114,7 @@ def __setitem__(self, key, value) -> None: "Series. Use `series.iloc[an_int] = val` to treat the " "key as positional.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 @@ -1797,7 +1800,7 @@ def iteritems(self) -> Iterable[tuple[Hashable, Any]]: "iteritems is deprecated and will be removed in a future version. " "Use .items instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.items() @@ -1880,7 +1883,7 @@ def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: "the future `None` will be used as the name of the resulting " "DataFrame column.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) name = lib.no_default @@ -2018,7 +2021,7 @@ def groupby( "will be removed in a future version." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) else: squeeze = False @@ -2078,7 +2081,7 @@ def count(self, level=None): "deprecated and will be removed in a future version. Use groupby " "instead. ser.count(level=1) should use ser.groupby(level=1).count().", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if not isinstance(self.index, MultiIndex): raise ValueError("Series.count level is only valid with a MultiIndex") @@ -3076,7 +3079,7 @@ def append( "and will be removed from pandas in a future version. " "Use pandas.concat instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._append(to_append, ignore_index, verify_integrity) @@ -4733,7 +4736,7 @@ def _reduce( f"Calling Series.{name} with {kwd_name}={numeric_only} and " f"dtype {self.dtype} will raise a TypeError in the future", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) raise NotImplementedError( f"Series.{name} does not implement {kwd_name}." @@ -5611,7 +5614,7 @@ def between(self, left, right, inclusive="both") -> Series: "Boolean inputs to the `inclusive` argument are deprecated in " "favour of `both` or `neither`.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if inclusive: inclusive = "both" diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index d50daad9a22b1..b66660f315df1 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -2,6 +2,7 @@ import codecs from functools import wraps +import inspect import re from typing import ( TYPE_CHECKING, @@ -242,7 +243,7 @@ def __iter__(self): warnings.warn( "Columnar iteration over characters will be deprecated in future releases.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) i = 0 g = self.get(i) @@ -1244,7 +1245,7 @@ def contains(self, pat, case=True, flags=0, na=None, regex=True): "This pattern is interpreted as a regular expression, and has " "match groups. To actually get the groups, use str.extract.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) result = self._data.array._str_contains(pat, case, flags, na, regex) @@ -1456,7 +1457,11 @@ def replace( " In addition, single character regular expressions will " "*not* be treated as literal strings when regex=True." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) # Check whether repl is valid (GH 13438, GH 15055) if not (isinstance(repl, str) or callable(repl)): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 7ec4bc1016a9d..388f751d9cc57 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -3,6 +3,7 @@ from collections import abc from datetime import datetime from functools import partial +import inspect from itertools import islice from typing import ( TYPE_CHECKING, @@ -1284,7 +1285,7 @@ def to_time(arg, format=None, infer_time_format=False, errors="raise"): "`to_time` has been moved, should be imported from pandas.core.tools.times. " "This alias will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.tools.times import to_time diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index e31b5c60a37ee..29b7558f40353 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -2,6 +2,7 @@ from __future__ import annotations from collections import defaultdict +import inspect from typing import cast import warnings @@ -203,5 +204,5 @@ def maybe_warn_args_and_kwargs(cls, kernel: str, args, kwargs) -> None: "no impact on the result and is deprecated. This will " "raise a TypeError in a future version of pandas.", category=FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 020ca71050015..32559d0d88bcf 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -2,6 +2,7 @@ import datetime from functools import partial +import inspect from textwrap import dedent from typing import ( TYPE_CHECKING, @@ -391,7 +392,7 @@ def __init__( "into times instead." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # self.times cannot be str anymore self.times = cast("Series", self._selected_obj[self.times]) @@ -683,7 +684,7 @@ def vol(self, bias: bool = False, *args, **kwargs): "Use std instead." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.std(bias, *args, **kwargs) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index c92c448304de2..3fc48b121419a 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -171,7 +171,7 @@ def win_type(self): "win_type will no longer return 'freq' in a future version. " "Check the type of self.window instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return "freq" return self._win_type @@ -181,7 +181,7 @@ def is_datetimelike(self) -> bool: warnings.warn( "is_datetimelike is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._win_freq_i8 is not None @@ -189,7 +189,7 @@ def validate(self) -> None: warnings.warn( "validate is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._validate() @@ -549,7 +549,7 @@ def hfunc(values: ArrayLike) -> ArrayLike: "Select only valid columns before calling the operation. " f"Dropped columns were {dropped}", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self._resolve_output(df, obj) @@ -1967,7 +1967,7 @@ def count(self, numeric_only: bool = False): "Specify min_periods=0 instead." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self.min_periods = 0 result = super().count() diff --git a/pandas/io/common.py b/pandas/io/common.py index d911499aa848e..7add6ec10222c 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -10,6 +10,7 @@ import dataclasses import functools import gzip +import inspect from io import ( BufferedIOBase, BytesIO, @@ -322,7 +323,7 @@ def _get_filepath_or_buffer( warnings.warn( "compression has no effect when passing a non-binary object as input.", RuntimeWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) compression_method = None diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index 85e92da8c2a54..5885a3b9d14d7 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -1,6 +1,7 @@ """This module is designed for community supported date conversion functions""" from __future__ import annotations +import inspect import warnings import numpy as np @@ -22,7 +23,7 @@ def parse_date_time(date_col, time_col) -> npt.NDArray[np.object_]: Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) date_col = _maybe_cast(date_col) time_col = _maybe_cast(time_col) @@ -42,7 +43,7 @@ def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]: np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) year_col = _maybe_cast(year_col) @@ -69,7 +70,7 @@ def parse_all_fields( np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. """, # noqa: E501 FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) year_col = _maybe_cast(year_col) @@ -95,7 +96,7 @@ def generic_parser(parse_func, *cols) -> np.ndarray: Use pd.to_datetime instead. """, FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) N = _check_columns(cols) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 44152f100d390..2fda6d239d85b 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -3,6 +3,7 @@ import abc import datetime from functools import partial +import inspect from io import BytesIO import os from textwrap import fill @@ -719,7 +720,7 @@ def parse( warnings.warn( "convert_float is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) validate_header_arg(header) @@ -1122,7 +1123,7 @@ def __new__( warnings.warn( "Use of **kwargs is deprecated, use engine_kwargs instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # only switch class if generic(ExcelWriter) @@ -1156,7 +1157,7 @@ def __new__( "deprecated and will also raise a warning, it can " "be globally set and the warning suppressed.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) # for mypy @@ -1326,7 +1327,7 @@ def _deprecate(self, attr: str): f"{attr} is not part of the public API, usage can give in unexpected " "results and will be removed in a future version", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) @property diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index df246ad30d806..0522e113d6525 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -6,6 +6,7 @@ from contextlib import contextmanager import copy from functools import partial +import inspect import operator from typing import ( Any, @@ -443,7 +444,7 @@ def render( warnings.warn( "this method is deprecated in favour of `Styler.to_html()`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if sparse_index is None: sparse_index = get_option("styler.sparse.index") @@ -2123,7 +2124,7 @@ def where( warnings.warn( "this method is deprecated in favour of `Styler.applymap()`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if other is None: @@ -2155,7 +2156,7 @@ def set_precision(self, precision: int) -> StylerRenderer: warnings.warn( "this method is deprecated in favour of `Styler.format(precision=..)`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self.precision = precision return self.format(precision=precision, na_rep=self.na_rep) @@ -2667,7 +2668,7 @@ def set_na_rep(self, na_rep: str) -> StylerRenderer: warnings.warn( "this method is deprecated in favour of `Styler.format(na_rep=..)`", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) self.na_rep = na_rep return self.format(na_rep=na_rep, precision=self.precision) @@ -2721,7 +2722,7 @@ def hide_index( warnings.warn( 'this method is deprecated in favour of `Styler.hide(axis="index")`', FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.hide(axis="index", level=level, subset=subset, names=names) @@ -2774,7 +2775,7 @@ def hide_columns( warnings.warn( 'this method is deprecated in favour of `Styler.hide(axis="columns")`', FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return self.hide(axis="columns", level=level, subset=subset, names=names) @@ -3381,7 +3382,7 @@ def f(data: DataFrame, props: str) -> np.ndarray: warnings.warn( "`null_color` is deprecated: use `color` instead", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if color is None and null_color == lib.no_default: diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 0e40e47bf7cb1..89bf903fea8dd 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -5,6 +5,7 @@ import csv import datetime from enum import Enum +import inspect import itertools from typing import ( TYPE_CHECKING, @@ -559,7 +560,7 @@ def _convert_to_ndarrays( f"for column {c} - only the converter will be used." ), ParserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) try: @@ -857,7 +858,7 @@ def _check_data_length( "Length of header or names does not match length of data. This leads " "to a loss of data with index_col=False.", ParserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) @overload diff --git a/pandas/io/parsers/c_parser_wrapper.py b/pandas/io/parsers/c_parser_wrapper.py index aec999e40b0f5..99051ec661413 100644 --- a/pandas/io/parsers/c_parser_wrapper.py +++ b/pandas/io/parsers/c_parser_wrapper.py @@ -1,6 +1,7 @@ from __future__ import annotations from collections import defaultdict +import inspect from io import TextIOWrapper from typing import ( TYPE_CHECKING, @@ -421,7 +422,11 @@ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: f"Specify dtype option on import or set low_memory=False." ] ) - warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) + warnings.warn( + warning_message, + DtypeWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return result diff --git a/pandas/io/parsers/python_parser.py b/pandas/io/parsers/python_parser.py index 7c03a81dbc0e6..ff8c21ab89f30 100644 --- a/pandas/io/parsers/python_parser.py +++ b/pandas/io/parsers/python_parser.py @@ -5,6 +5,7 @@ defaultdict, ) import csv +import inspect from io import StringIO import re import sys @@ -599,7 +600,7 @@ def _handle_usecols( "Defining usecols with out of bounds indices is deprecated " "and will raise a ParserError in a future version.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) col_indices = self.usecols diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index dc4556542d8e2..03a634cf07e26 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -5,6 +5,7 @@ from collections import abc import csv +import inspect import sys from textwrap import fill from typing import ( @@ -1609,7 +1610,7 @@ def _clean_options( "engine='python'." ), ParserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) index_col = options["index_col"] @@ -1628,7 +1629,11 @@ def _clean_options( f"The {arg} argument has been deprecated and will be " f"removed in a future version. {depr_default.msg}\n\n" ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) else: result[arg] = parser_default @@ -2203,7 +2208,9 @@ def _merge_with_dialect_properties( if conflict_msgs: warnings.warn( - "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + "\n\n".join(conflict_msgs), + ParserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) kwds[param] = dialect_val return kwds diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index a4049eff8ae71..f7d5fb9270247 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -10,6 +10,7 @@ date, tzinfo, ) +import inspect import itertools import os import re @@ -686,7 +687,7 @@ def iteritems(self): "iteritems is deprecated and will be removed in a future version. " "Use .items instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) yield from self.items() @@ -2195,7 +2196,9 @@ def update_info(self, info) -> None: if key in ["freq", "index_name"]: ws = attribute_conflict_doc % (key, existing_value, value) warnings.warn( - ws, AttributeConflictWarning, stacklevel=find_stack_level() + ws, + AttributeConflictWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) # reset @@ -3093,7 +3096,11 @@ def write_array( pass else: ws = performance_doc % (inferred_type, key, items) - warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) + warnings.warn( + ws, + PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 71fecba4340ac..2b835a1e7ebed 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -12,6 +12,7 @@ time, ) from functools import partial +import inspect import re from typing import ( TYPE_CHECKING, @@ -1193,7 +1194,7 @@ def _sqlalchemy_type(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return BigInteger elif col_type == "floating": @@ -1962,7 +1963,7 @@ def _sql_type_name(self, col): "the 'timedelta' type is not supported, and will be " "written as integer values (ns frequency) to the database.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) col_type = "integer" diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index a8b1e4c572c43..c5119205d1861 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -1,6 +1,7 @@ # being a bit too dynamic from __future__ import annotations +import inspect from math import ceil from typing import ( TYPE_CHECKING, @@ -239,7 +240,7 @@ def create_subplots( "When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified when creating axes.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) if ax.size == naxes: fig = ax.flat[0].get_figure() @@ -262,7 +263,7 @@ def create_subplots( "To output multiple subplots, the figure containing " "the passed axes is being cleared.", UserWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) fig.clear() diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index b2fbc022b2708..35174d92b4125 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import warnings import numpy as np @@ -116,7 +117,7 @@ def get_offset(name: str) -> BaseOffset: "get_offset is deprecated and will be removed in a future version, " "use to_offset instead.", FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return _get_offset(name) diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index f8359edaa8d44..86c945f1321f5 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -312,7 +312,7 @@ def wrapper(*args, **kwargs): warnings.warn( msg.format(arguments=arguments), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), ) return func(*args, **kwargs) diff --git a/pandas/util/_exceptions.py b/pandas/util/_exceptions.py index c718451fbf621..fcd191e25ced5 100644 --- a/pandas/util/_exceptions.py +++ b/pandas/util/_exceptions.py @@ -1,6 +1,7 @@ from __future__ import annotations import contextlib +import functools import inspect import os from typing import Iterator @@ -25,10 +26,16 @@ def rewrite_exception(old_name: str, new_name: str) -> Iterator[None]: raise -def find_stack_level() -> int: [email protected]_cache +def find_stack_level(frame) -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). + + ``frame`` should be passed as ``inspect.currentframe()`` by the + calling function. + + https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow """ import pandas as pd @@ -36,9 +43,7 @@ def find_stack_level() -> int: pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") - # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow - frame = inspect.currentframe() - n = 0 + n = 1 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index a4a9ebfbf4126..7e938e4648e97 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -4,6 +4,7 @@ """ from __future__ import annotations +import inspect from typing import ( Any, Iterable, @@ -355,7 +356,9 @@ def validate_axis_style_args( "positional arguments for 'index' or 'columns' will raise " "a 'TypeError'." ) - warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) out[data._get_axis_name(0)] = args[0] out[data._get_axis_name(1)] = args[1] else: diff --git a/pandas/util/testing.py b/pandas/util/testing.py index db9bfc274cd78..5585ea0b58628 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1,3 +1,4 @@ +import inspect import warnings from pandas.util._exceptions import find_stack_level @@ -10,5 +11,5 @@ "public API at pandas.testing instead." ), FutureWarning, - stacklevel=find_stack_level(), + stacklevel=find_stack_level(inspect.currentframe()), )
This would be a precursor to #47998 and #47828. If repeatedly calling `find_stack_level`, then performance takes a hit Demo of impact: if we run the file ```python import cProfile import pstats, cProfile import re import pandas as pd data = pd.date_range('1990', '2000').strftime('%d/%m/%Y').tolist() def fun(): pd.to_datetime(data, dayfirst=False) cProfile.runctx("fun()", globals(), locals(), "Profile.prof") s = pstats.Stats("Profile.prof") s.strip_dirs().sort_stats("time").print_stats() ``` On main: ``` # 1199 function calls (1187 primitive calls) in 0.046 seconds # # Ordered by: internal time # # ncalls tottime percall cumtime percall filename:lineno(function) # 1 0.034 0.034 0.035 0.035 {built-in method pandas._libs.tslib.array_to_datetime} # 262 0.001 0.000 0.001 0.000 {built-in method builtins.isinstance} # 2/1 0.000 0.000 0.006 0.006 series.py:342(__init__) # 2 0.000 0.000 0.000 0.000 {built-in method numpy.array} # 1 0.000 0.000 0.046 0.046 {built-in method builtins.exec} # 1 0.000 0.000 0.001 0.001 sre_parse.py:493(_parse) # 1 0.000 0.000 0.046 0.046 datetimes.py:702(to_datetime) # 2/1 0.000 0.000 0.000 0.000 sre_compile.py:71(_compile) # 2/1 0.000 0.000 0.001 0.001 base.py:431(__new__) # 2 0.000 0.000 0.000 0.000 {pandas._libs.lib.maybe_convert_objects} ``` Using find_stack_level in https://github.com/pandas-dev/pandas/blob/6db95e79297a4dcefff53ca151324949b0c956c3/pandas/_libs/tslibs/parsing.pyx#L217 and https://github.com/pandas-dev/pandas/blob/6db95e79297a4dcefff53ca151324949b0c956c3/pandas/_libs/tslibs/parsing.pyx#L225 ``` # 224611 function calls (224599 primitive calls) in 0.582 seconds # # Ordered by: internal time # # ncalls tottime percall cumtime percall filename:lineno(function) # 11060 0.106 0.000 0.342 0.000 inspect.py:655(getfile) # 82106 0.081 0.000 0.082 0.000 {built-in method builtins.isinstance} # 2212 0.063 0.000 0.517 0.000 _exceptions.py:30(find_stack_level) # 1 0.045 0.045 0.572 0.572 {built-in method pandas._libs.tslib.array_to_datetime} # 22128 0.027 0.000 0.027 0.000 {method 'startswith' of 'str' objects} # 11060 0.023 0.000 0.035 0.000 inspect.py:64(ismodule) # 11060 0.023 0.000 0.034 0.000 inspect.py:81(ismethod) # 11060 0.023 0.000 0.033 0.000 inspect.py:261(iscode) # 11060 0.023 0.000 0.034 0.000 inspect.py:237(istraceback) # 11060 0.023 0.000 0.034 0.000 inspect.py:159(isfunction) ``` Using find_stack_level in the same places as above, but with caching (as in this branch): ``` # 7916 function calls (7904 primitive calls) in 0.065 seconds # # Ordered by: internal time # # ncalls tottime percall cumtime percall filename:lineno(function) # 1 0.031 0.031 0.056 0.056 {built-in method pandas._libs.tslib.array_to_datetime} # 2212 0.018 0.000 0.024 0.000 inspect.py:1546(currentframe) # 2231 0.003 0.000 0.003 0.000 {built-in method builtins.hasattr} # 2212 0.003 0.000 0.003 0.000 {built-in method sys._getframe} # 292 0.001 0.000 0.001 0.000 {built-in method builtins.isinstance} # 1 0.000 0.000 0.065 0.065 {built-in method builtins.exec} # 2/1 0.000 0.000 0.005 0.005 series.py:342(__init__) # 1 0.000 0.000 0.001 0.001 sre_parse.py:493(_parse) # 2 0.000 0.000 0.000 0.000 {built-in method numpy.array} # 49/41 0.000 0.000 0.000 0.000 {built-in method builtins.len} ```
https://api.github.com/repos/pandas-dev/pandas/pulls/48023
2022-08-10T13:20:49Z
2022-08-11T16:53:53Z
2022-08-11T16:53:53Z
2022-10-14T08:00:43Z
ENH: change get_dummies default dtype to bool
diff --git a/doc/source/whatsnew/v1.6.0.rst b/doc/source/whatsnew/v1.6.0.rst index bdaea89776b7c..0cad6f3caaf91 100644 --- a/doc/source/whatsnew/v1.6.0.rst +++ b/doc/source/whatsnew/v1.6.0.rst @@ -118,6 +118,7 @@ Other API changes ^^^^^^^^^^^^^^^^^ - Passing ``nanoseconds`` greater than 999 or less than 0 in :class:`Timestamp` now raises a ``ValueError`` (:issue:`48538`, :issue:`48255`) - :func:`read_csv`: specifying an incorrect number of columns with ``index_col`` of now raises ``ParserError`` instead of ``IndexError`` when using the c parser. +- Default value of ``dtype`` in :func:`get_dummies` is changed to ``bool`` from ``uint8`` (:issue:`45848`) - :meth:`DataFrame.astype`, :meth:`Series.astype`, and :meth:`DatetimeIndex.astype` casting datetime64 data to any of "datetime64[s]", "datetime64[ms]", "datetime64[us]" will return an object with the given resolution instead of coercing back to "datetime64[ns]" (:issue:`48928`) - :meth:`DataFrame.astype`, :meth:`Series.astype`, and :meth:`DatetimeIndex.astype` casting timedelta64 data to any of "timedelta64[s]", "timedelta64[ms]", "timedelta64[us]" will return an object with the given resolution instead of coercing to "float64" dtype (:issue:`48963`) - diff --git a/pandas/core/reshape/encoding.py b/pandas/core/reshape/encoding.py index 6670633fcc587..a39e3c1f10956 100644 --- a/pandas/core/reshape/encoding.py +++ b/pandas/core/reshape/encoding.py @@ -66,7 +66,7 @@ def get_dummies( drop_first : bool, default False Whether to get k-1 dummies out of k categorical levels by removing the first level. - dtype : dtype, default np.uint8 + dtype : dtype, default bool Data type for new columns. Only a single dtype is allowed. Returns @@ -89,50 +89,50 @@ def get_dummies( >>> s = pd.Series(list('abca')) >>> pd.get_dummies(s) - a b c - 0 1 0 0 - 1 0 1 0 - 2 0 0 1 - 3 1 0 0 + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False >>> s1 = ['a', 'b', np.nan] >>> pd.get_dummies(s1) - a b - 0 1 0 - 1 0 1 - 2 0 0 + a b + 0 True False + 1 False True + 2 False False >>> pd.get_dummies(s1, dummy_na=True) - a b NaN - 0 1 0 0 - 1 0 1 0 - 2 0 0 1 + a b NaN + 0 True False False + 1 False True False + 2 False False True >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], ... 'C': [1, 2, 3]}) >>> pd.get_dummies(df, prefix=['col1', 'col2']) C col1_a col1_b col2_a col2_b col2_c - 0 1 1 0 0 1 0 - 1 2 0 1 1 0 0 - 2 3 1 0 0 0 1 + 0 1 True False False True False + 1 2 False True True False False + 2 3 True False False False True >>> pd.get_dummies(pd.Series(list('abcaa'))) - a b c - 0 1 0 0 - 1 0 1 0 - 2 0 0 1 - 3 1 0 0 - 4 1 0 0 + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False + 4 True False False >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) - b c - 0 0 0 - 1 1 0 - 2 0 1 - 3 0 0 - 4 0 0 + b c + 0 False False + 1 True False + 2 False True + 3 False False + 4 False False >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) a b c @@ -236,7 +236,7 @@ def _get_dummies_1d( codes, levels = factorize_from_iterable(Series(data)) if dtype is None: - dtype = np.dtype(np.uint8) + dtype = np.dtype(bool) # error: Argument 1 to "dtype" has incompatible type "Union[ExtensionDtype, str, # dtype[Any], Type[object]]"; expected "Type[Any]" dtype = np.dtype(dtype) # type: ignore[arg-type] diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py index f5c85bd98d8ad..0c1b206cc39bb 100644 --- a/pandas/tests/frame/indexing/test_getitem.py +++ b/pandas/tests/frame/indexing/test_getitem.py @@ -52,9 +52,7 @@ def test_getitem_list_of_labels_categoricalindex_cols(self): # GH#16115 cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")]) - expected = DataFrame( - [[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats - ) + expected = DataFrame([[1, 0], [0, 1]], dtype="bool", index=[0, 1], columns=cats) dummies = get_dummies(cats) result = dummies[list(dummies.columns)] tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index 6c9a60caaa2be..4345a357a0ba8 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -171,7 +171,7 @@ def test_get_dummies_unicode(self, sparse): s = [e, eacute, eacute] res = get_dummies(s, prefix="letter", sparse=sparse) exp = DataFrame( - {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8 + {"letter_e": [True, False, False], f"letter_{eacute}": [False, True, True]} ) if sparse: exp = exp.apply(SparseArray, fill_value=0) @@ -182,15 +182,15 @@ def test_dataframe_dummies_all_obj(self, df, sparse): result = get_dummies(df, sparse=sparse) expected = DataFrame( {"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]}, - dtype=np.uint8, + dtype=bool, ) if sparse: expected = DataFrame( { - "A_a": SparseArray([1, 0, 1], dtype="uint8"), - "A_b": SparseArray([0, 1, 0], dtype="uint8"), - "B_b": SparseArray([1, 1, 0], dtype="uint8"), - "B_c": SparseArray([0, 0, 1], dtype="uint8"), + "A_a": SparseArray([1, 0, 1], dtype="bool"), + "A_b": SparseArray([0, 1, 0], dtype="bool"), + "B_b": SparseArray([1, 1, 0], dtype="bool"), + "B_c": SparseArray([0, 0, 1], dtype="bool"), } ) @@ -208,7 +208,7 @@ def test_dataframe_dummies_string_dtype(self, df): "B_b": [1, 1, 0], "B_c": [0, 0, 1], }, - dtype=np.uint8, + dtype=bool, ) tm.assert_frame_equal(result, expected) @@ -238,12 +238,11 @@ def test_dataframe_dummies_prefix_list(self, df, sparse): expected = DataFrame( { "C": [1, 2, 3], - "from_A_a": [1, 0, 1], - "from_A_b": [0, 1, 0], - "from_B_b": [1, 1, 0], - "from_B_c": [0, 0, 1], + "from_A_a": [True, False, True], + "from_A_b": [False, True, False], + "from_B_b": [True, True, False], + "from_B_c": [False, False, True], }, - dtype=np.uint8, ) expected[["C"]] = df[["C"]] cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] @@ -258,9 +257,12 @@ def test_dataframe_dummies_prefix_str(self, df, sparse): result = get_dummies(df, prefix="bad", sparse=sparse) bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"] expected = DataFrame( - [[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]], + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], columns=["C"] + bad_columns, - dtype=np.uint8, ) expected = expected.astype({"C": np.int64}) if sparse: @@ -269,10 +271,10 @@ def test_dataframe_dummies_prefix_str(self, df, sparse): expected = pd.concat( [ Series([1, 2, 3], name="C"), - Series([1, 0, 1], name="bad_a", dtype="Sparse[uint8]"), - Series([0, 1, 0], name="bad_b", dtype="Sparse[uint8]"), - Series([1, 1, 0], name="bad_b", dtype="Sparse[uint8]"), - Series([0, 0, 1], name="bad_c", dtype="Sparse[uint8]"), + Series([True, False, True], name="bad_a", dtype="Sparse[bool]"), + Series([False, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([True, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([False, False, True], name="bad_c", dtype="Sparse[bool]"), ], axis=1, ) @@ -290,11 +292,11 @@ def test_dataframe_dummies_subset(self, df, sparse): }, ) cols = expected.columns - expected[cols[1:]] = expected[cols[1:]].astype(np.uint8) + expected[cols[1:]] = expected[cols[1:]].astype(bool) expected[["C"]] = df[["C"]] if sparse: cols = ["from_A_a", "from_A_b"] - expected[cols] = expected[cols].astype(SparseDtype("uint8", 0)) + expected[cols] = expected[cols].astype(SparseDtype("bool", 0)) tm.assert_frame_equal(result, expected) def test_dataframe_dummies_prefix_sep(self, df, sparse): @@ -302,18 +304,17 @@ def test_dataframe_dummies_prefix_sep(self, df, sparse): expected = DataFrame( { "C": [1, 2, 3], - "A..a": [1, 0, 1], - "A..b": [0, 1, 0], - "B..b": [1, 1, 0], - "B..c": [0, 0, 1], + "A..a": [True, False, True], + "A..b": [False, True, False], + "B..b": [True, True, False], + "B..c": [False, False, True], }, - dtype=np.uint8, ) expected[["C"]] = df[["C"]] expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]] if sparse: cols = ["A..a", "A..b", "B..b", "B..c"] - expected[cols] = expected[cols].astype(SparseDtype("uint8", 0)) + expected[cols] = expected[cols].astype(SparseDtype("bool", 0)) tm.assert_frame_equal(result, expected) @@ -356,9 +357,9 @@ def test_dataframe_dummies_prefix_dict(self, sparse): ) columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] - expected[columns] = expected[columns].astype(np.uint8) + expected[columns] = expected[columns].astype(bool) if sparse: - expected[columns] = expected[columns].astype(SparseDtype("uint8", 0)) + expected[columns] = expected[columns].astype(SparseDtype("bool", 0)) tm.assert_frame_equal(result, expected) @@ -422,19 +423,19 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype): [ ( {"data": DataFrame({"ä": ["a"]})}, - DataFrame({"ä_a": [1]}, dtype=np.uint8), + DataFrame({"ä_a": [True]}), ), ( {"data": DataFrame({"x": ["ä"]})}, - DataFrame({"x_ä": [1]}, dtype=np.uint8), + DataFrame({"x_ä": [True]}), ), ( {"data": DataFrame({"x": ["a"]}), "prefix": "ä"}, - DataFrame({"ä_a": [1]}, dtype=np.uint8), + DataFrame({"ä_a": [True]}), ), ( {"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"}, - DataFrame({"xäa": [1]}, dtype=np.uint8), + DataFrame({"xäa": [True]}), ), ], ) @@ -451,7 +452,7 @@ def test_get_dummies_basic_drop_first(self, sparse): s_series = Series(s_list) s_series_index = Series(s_list, list("ABC")) - expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=np.uint8) + expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=bool) result = get_dummies(s_list, drop_first=True, sparse=sparse) if sparse: @@ -487,14 +488,14 @@ def test_get_dummies_basic_drop_first_NA(self, sparse): # Test NA handling together with drop_first s_NA = ["a", "b", np.nan] res = get_dummies(s_NA, drop_first=True, sparse=sparse) - exp = DataFrame({"b": [0, 1, 0]}, dtype=np.uint8) + exp = DataFrame({"b": [0, 1, 0]}, dtype=bool) if sparse: exp = exp.apply(SparseArray, fill_value=0) tm.assert_frame_equal(res, exp) res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse) - exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=np.uint8).reindex( + exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=bool).reindex( ["b", np.nan], axis=1 ) if sparse: @@ -510,7 +511,7 @@ def test_get_dummies_basic_drop_first_NA(self, sparse): def test_dataframe_dummies_drop_first(self, df, sparse): df = df[["A", "B"]] result = get_dummies(df, drop_first=True, sparse=sparse) - expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=np.uint8) + expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=bool) if sparse: expected = expected.apply(SparseArray, fill_value=0) tm.assert_frame_equal(result, expected) @@ -522,7 +523,7 @@ def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype): {"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]} ) cols = ["A_b", "B_c", "cat_y"] - expected[cols] = expected[cols].astype(np.uint8) + expected[cols] = expected[cols].astype(bool) expected = expected[["C", "A_b", "B_c", "cat_y"]] if sparse: for col in cols: @@ -544,7 +545,7 @@ def test_dataframe_dummies_drop_first_with_na(self, df, sparse): } ) cols = ["A_b", "A_nan", "B_c", "B_nan"] - expected[cols] = expected[cols].astype(np.uint8) + expected[cols] = expected[cols].astype(bool) expected = expected.sort_index(axis=1) if sparse: for col in cols: @@ -559,13 +560,13 @@ def test_dataframe_dummies_drop_first_with_na(self, df, sparse): def test_get_dummies_int_int(self): data = Series([1, 2, 1]) result = get_dummies(data) - expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=np.uint8) + expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=bool) tm.assert_frame_equal(result, expected) data = Series(Categorical(["a", "b", "a"])) result = get_dummies(data) expected = DataFrame( - [[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=np.uint8 + [[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=bool ) tm.assert_frame_equal(result, expected) @@ -616,9 +617,12 @@ def test_get_dummies_duplicate_columns(self, df): result = get_dummies(df).sort_index(axis=1) expected = DataFrame( - [[1, 1, 0, 1, 0], [2, 0, 1, 1, 0], [3, 1, 0, 0, 1]], + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], columns=["A", "A_a", "A_b", "A_b", "A_c"], - dtype=np.uint8, ).sort_index(axis=1) expected = expected.astype({"A": np.int64}) @@ -628,7 +632,7 @@ def test_get_dummies_duplicate_columns(self, df): def test_get_dummies_all_sparse(self): df = DataFrame({"A": [1, 2]}) result = get_dummies(df, columns=["A"], sparse=True) - dtype = SparseDtype("uint8", 0) + dtype = SparseDtype("bool", 0) expected = DataFrame( { "A_1": SparseArray([1, 0], dtype=dtype),
Added a future warning when no dtype is passed to `get_dummies` stating the the default dtype will change to `bool` from `np.uint8` - [x] Closes #45848 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48022
2022-08-10T11:51:16Z
2022-10-11T16:29:10Z
2022-10-11T16:29:10Z
2023-03-01T07:26:05Z
ENH: Support mask in groupby sum
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 502e37705abfb..e2cd7cb472ea5 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1047,6 +1047,7 @@ Groupby/resample/rolling - Bug when using ``engine="numba"`` would return the same jitted function when modifying ``engine_kwargs`` (:issue:`46086`) - Bug in :meth:`.DataFrameGroupBy.transform` fails when ``axis=1`` and ``func`` is ``"first"`` or ``"last"`` (:issue:`45986`) - Bug in :meth:`DataFrameGroupBy.cumsum` with ``skipna=False`` giving incorrect results (:issue:`46216`) +- Bug in :meth:`GroupBy.sum` with integer dtypes losing precision (:issue:`37493`) - Bug in :meth:`.GroupBy.cumsum` with ``timedelta64[ns]`` dtype failing to recognize ``NaT`` as a null value (:issue:`46216`) - Bug in :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with nullable dtypes incorrectly altering the original data in place (:issue:`46220`) - Bug in :meth:`DataFrame.groupby` raising error when ``None`` is in first level of :class:`MultiIndex` (:issue:`47348`) diff --git a/pandas/_libs/groupby.pyi b/pandas/_libs/groupby.pyi index dfae1bff91ac8..3ec37718eb652 100644 --- a/pandas/_libs/groupby.pyi +++ b/pandas/_libs/groupby.pyi @@ -51,10 +51,12 @@ def group_any_all( skipna: bool, ) -> None: ... def group_sum( - out: np.ndarray, # complexfloating_t[:, ::1] + out: np.ndarray, # complexfloatingintuint_t[:, ::1] counts: np.ndarray, # int64_t[::1] - values: np.ndarray, # ndarray[complexfloating_t, ndim=2] + values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2] labels: np.ndarray, # const intp_t[:] + mask: np.ndarray | None, + result_mask: np.ndarray | None = ..., min_count: int = ..., is_datetimelike: bool = ..., ) -> None: ... diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 06830a1d84c6e..e4314edecfa7e 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -513,6 +513,15 @@ ctypedef fused mean_t: ctypedef fused sum_t: mean_t + int8_t + int16_t + int32_t + int64_t + + uint8_t + uint16_t + uint32_t + uint64_t object @@ -523,6 +532,8 @@ def group_sum( int64_t[::1] counts, ndarray[sum_t, ndim=2] values, const intp_t[::1] labels, + const uint8_t[:, :] mask, + uint8_t[:, ::1] result_mask=None, Py_ssize_t min_count=0, bint is_datetimelike=False, ) -> None: @@ -535,6 +546,8 @@ def group_sum( sum_t[:, ::1] sumx, compensation int64_t[:, ::1] nobs Py_ssize_t len_values = len(values), len_labels = len(labels) + bint uses_mask = mask is not None + bint isna_entry if len_values != len_labels: raise ValueError("len(index) != len(labels)") @@ -572,7 +585,8 @@ def group_sum( for i in range(ncounts): for j in range(K): if nobs[i, j] < min_count: - out[i, j] = NAN + out[i, j] = None + else: out[i, j] = sumx[i, j] else: @@ -590,11 +604,18 @@ def group_sum( # With dt64/td64 values, values have been cast to float64 # instead if int64 for group_sum, but the logic # is otherwise the same as in _treat_as_na - if val == val and not ( - sum_t is float64_t - and is_datetimelike - and val == <float64_t>NPY_NAT - ): + if uses_mask: + isna_entry = mask[i, j] + elif (sum_t is float32_t or sum_t is float64_t + or sum_t is complex64_t or sum_t is complex64_t): + # avoid warnings because of equality comparison + isna_entry = not val == val + elif sum_t is int64_t and is_datetimelike and val == NPY_NAT: + isna_entry = True + else: + isna_entry = False + + if not isna_entry: nobs[lab, j] += 1 y = val - compensation[lab, j] t = sumx[lab, j] + y @@ -604,7 +625,23 @@ def group_sum( for i in range(ncounts): for j in range(K): if nobs[i, j] < min_count: - out[i, j] = NAN + # if we are integer dtype, not is_datetimelike, and + # not uses_mask, then getting here implies that + # counts[i] < min_count, which means we will + # be cast to float64 and masked at the end + # of WrappedCythonOp._call_cython_op. So we can safely + # set a placeholder value in out[i, j]. + if uses_mask: + result_mask[i, j] = True + elif (sum_t is float32_t or sum_t is float64_t + or sum_t is complex64_t or sum_t is complex64_t): + out[i, j] = NAN + elif sum_t is int64_t: + out[i, j] = NPY_NAT + else: + # placeholder, see above + out[i, j] = 0 + else: out[i, j] = sumx[i, j] diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 5feed98cbc75b..7617ca5074c9c 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -155,6 +155,7 @@ def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: "last", "first", "rank", + "sum", } _cython_arity = {"ohlc": 4} # OHLC @@ -217,7 +218,7 @@ def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: values = ensure_float64(values) elif values.dtype.kind in ["i", "u"]: - if how in ["sum", "var", "prod", "mean", "ohlc"] or ( + if how in ["var", "prod", "mean", "ohlc"] or ( self.kind == "transform" and self.has_dropped_na ): # result may still include NaN, so we have to cast @@ -578,6 +579,8 @@ def _call_cython_op( counts=counts, values=values, labels=comp_ids, + mask=mask, + result_mask=result_mask, min_count=min_count, is_datetimelike=is_datetimelike, ) @@ -613,7 +616,8 @@ def _call_cython_op( # need to have the result set to np.nan, which may require casting, # see GH#40767 if is_integer_dtype(result.dtype) and not is_datetimelike: - cutoff = max(1, min_count) + # Neutral value for sum is 0, so don't fill empty groups with nan + cutoff = max(0 if self.how == "sum" else 1, min_count) empty_groups = counts < cutoff if empty_groups.any(): if result_mask is not None and self.uses_mask(): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index a6ab13270c4dc..a7c5b85e365ae 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2808,3 +2808,24 @@ def test_single_element_list_grouping(): ) with tm.assert_produces_warning(FutureWarning, match=msg): values, _ = next(iter(df.groupby(["a"]))) + + +def test_groupby_sum_avoid_casting_to_float(): + # GH#37493 + val = 922337203685477580 + df = DataFrame({"a": 1, "b": [val]}) + result = df.groupby("a").sum() - val + expected = DataFrame({"b": [0]}, index=Index([1], name="a")) + tm.assert_frame_equal(result, expected) + + +def test_groupby_sum_support_mask(any_numeric_ea_dtype): + # GH#37493 + df = DataFrame({"a": 1, "b": [1, 2, pd.NA]}, dtype=any_numeric_ea_dtype) + result = df.groupby("a").sum() + expected = DataFrame( + {"b": [3]}, + index=Index([1], name="a", dtype=any_numeric_ea_dtype), + dtype=any_numeric_ea_dtype, + ) + tm.assert_frame_equal(result, expected)
- [x] xref #37493 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/48018
2022-08-09T21:12:19Z
2022-08-11T17:23:53Z
2022-08-11T17:23:53Z
2022-08-12T18:29:10Z
BUG: iloc raising for ea and null slice and length one list
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 20997bdde5da1..46c375b92dd83 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1725,6 +1725,9 @@ def _unwrap_setitem_indexer(self, indexer): elif com.is_null_slice(indexer[1]): indexer = indexer[0] + elif is_list_like(indexer[1]) and indexer[1][0] == 0: + indexer = indexer[0] + else: raise NotImplementedError( "This should not be reached. Please report a bug at " diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 642bdb127f209..9027ce8109810 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1363,6 +1363,14 @@ def test_iloc_setitem_string_na(self, val): expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string") tm.assert_frame_equal(df, expected) + @pytest.mark.parametrize("func", [list, Series, np.array]) + def test_iloc_setitem_ea_null_slice_length_one_list(self, func): + # GH#48016 + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + df.iloc[:, func([0])] = 5 + expected = DataFrame({"a": [5, 5, 5]}, dtype="Int64") + tm.assert_frame_equal(df, expected) + class TestDataFrameIndexingUInt64: def test_setitem(self, uint64_frame):
This is also a regression on main
https://api.github.com/repos/pandas-dev/pandas/pulls/48016
2022-08-09T17:49:52Z
2022-08-09T19:43:46Z
2022-08-09T19:43:46Z
2022-08-09T21:12:52Z
TYP: avoid inherit_names for DatetimeIndexOpsMixin
diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index c3d550c7a5ba9..34c5c661f6ec9 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -13,6 +13,7 @@ from typing import ( import numpy as np +from pandas._libs.tslibs.nattype import NaTType from pandas._typing import npt from .timedeltas import Timedelta @@ -49,6 +50,8 @@ class BaseOffset: @overload def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ... @overload + def __radd__(self, other: NaTType) -> NaTType: ... + @overload def __radd__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ... @overload def __radd__(self, other: _DatetimeT) -> _DatetimeT: ... diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 9d32bc008eb25..c946fc2ac6e13 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -925,7 +925,7 @@ def _maybe_mask_results( # Frequency Properties/Methods @property - def freq(self): + def freq(self) -> BaseOffset | None: """ Return the frequency object if it is set, otherwise None. """ @@ -1220,7 +1220,9 @@ def _sub_period(self, other: Period) -> npt.NDArray[np.object_]: new_i8_data = checked_add_with_arr( self.asi8, -other.ordinal, arr_mask=self._isnan ) - new_data = np.array([self.freq.base * x for x in new_i8_data]) + new_data = np.array( + [cast("PeriodArray", self).freq.base * x for x in new_i8_data] + ) if self._hasna: new_data[self._isnan] = NaT @@ -1456,8 +1458,9 @@ def __add__(self, other): # as is_integer returns True for these if not is_period_dtype(self.dtype): raise integer_op_not_supported(self) - result = cast("PeriodArray", self)._addsub_int_array_or_scalar( - other * self.freq.n, operator.add + self_periodarray = cast("PeriodArray", self) + result = self_periodarray._addsub_int_array_or_scalar( + other * self_periodarray.freq.n, operator.add ) # array-like others @@ -1473,8 +1476,9 @@ def __add__(self, other): elif is_integer_dtype(other_dtype): if not is_period_dtype(self.dtype): raise integer_op_not_supported(self) + # error: Item "None" of "Optional[BaseOffset]" has no attribute "n" result = cast("PeriodArray", self)._addsub_int_array_or_scalar( - other * self.freq.n, operator.add + other * self.freq.n, operator.add # type: ignore[union-attr] ) else: # Includes Categorical, other ExtensionArrays @@ -1514,8 +1518,9 @@ def __sub__(self, other): # as is_integer returns True for these if not is_period_dtype(self.dtype): raise integer_op_not_supported(self) + # error: Item "None" of "Optional[BaseOffset]" has no attribute "n" result = cast("PeriodArray", self)._addsub_int_array_or_scalar( - other * self.freq.n, operator.sub + other * self.freq.n, operator.sub # type: ignore[union-attr] ) elif isinstance(other, Period): @@ -1537,8 +1542,9 @@ def __sub__(self, other): elif is_integer_dtype(other_dtype): if not is_period_dtype(self.dtype): raise integer_op_not_supported(self) + # error: Item "None" of "Optional[BaseOffset]" has no attribute "n" result = cast("PeriodArray", self)._addsub_int_array_or_scalar( - other * self.freq.n, operator.sub + other * self.freq.n, operator.sub # type: ignore[union-attr] ) else: # Includes ExtensionArrays, float_dtype diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 193ab6dc99350..a76491e04b93c 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -36,6 +36,7 @@ npt, ) from pandas.compat.numpy import function as nv +from pandas.util._decorators import doc from pandas.util._validators import validate_endpoints from pandas.core.dtypes.astype import astype_td64_unit_conversion @@ -143,6 +144,23 @@ def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType: return NaT return Timedelta._from_value_and_reso(y, reso=self._reso) + # error: Decorated property not supported + @property # type: ignore[misc] + @doc(dtl.DatetimeLikeArrayMixin.freq) + def freq(self) -> Tick | None: + # error: Incompatible return value type (got "Optional[BaseOffset]", expected + # "Optional[Tick]") + return self._freq # type: ignore[return-value] + + @freq.setter + def freq(self, value) -> None: + # python doesn't support super().freq = value (any mypy has some + # issue with the workaround) + # error: overloaded function has no attribute "fset" + super(TimedeltaArray, TimedeltaArray).freq.fset( # type: ignore[attr-defined] + self, value + ) + @property # error: Return type "dtype" of "dtype" incompatible with return type # "ExtensionDtype" in supertype "ExtensionArray" diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 6867ef936d45e..ab5fc8f06f5ab 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -3,6 +3,10 @@ """ from __future__ import annotations +from abc import ( + ABC, + abstractmethod, +) from datetime import datetime import inspect from typing import ( @@ -30,6 +34,7 @@ parsing, to_offset, ) +from pandas._typing import npt from pandas.compat.numpy import function as nv from pandas.util._decorators import ( Appender, @@ -59,10 +64,7 @@ Index, _index_shared_docs, ) -from pandas.core.indexes.extension import ( - NDArrayBackedExtensionIndex, - inherit_names, -) +from pandas.core.indexes.extension import NDArrayBackedExtensionIndex from pandas.core.indexes.range import RangeIndex from pandas.core.tools.timedeltas import to_timedelta @@ -75,13 +77,7 @@ _TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin") -@inherit_names( - ["inferred_freq", "_resolution_obj", "resolution"], - DatetimeLikeArrayMixin, - cache=True, -) -@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin) -class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex): +class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC): """ Common ops mixin to support a unified interface datetimelike Index. """ @@ -89,9 +85,41 @@ class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex): _is_numeric_dtype = False _can_hold_strings = False _data: DatetimeArray | TimedeltaArray | PeriodArray - freq: BaseOffset | None - freqstr: str | None - _resolution_obj: Resolution + + # ------------------------------------------------------------------------ + + @doc(DatetimeLikeArrayMixin.mean) + def mean(self, *, skipna: bool = True, axis: int | None = 0): + return self._data.mean(skipna=skipna, axis=axis) + + # error: Decorated property not supported + @property # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.asi8) + def asi8(self) -> npt.NDArray[np.int64]: + return self._data.asi8 + + # error: Decorated property not supported + @property # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.freq) + def freq(self) -> BaseOffset | None: + return self._data.freq + + # error: Decorated property not supported + @property # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.freqstr) + def freqstr(self) -> str | None: + return self._data.freqstr + + @cache_readonly + @abstractmethod + def _resolution_obj(self) -> Resolution: + ... + + # error: Decorated property not supported + @cache_readonly # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.resolution) + def resolution(self) -> str: + return self._data.resolution # ------------------------------------------------------------------------ @@ -373,7 +401,7 @@ def _maybe_cast_listlike_indexer(self, keyarr): return Index(res, dtype=res.dtype) -class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin): +class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC): """ Mixin class for methods shared by DatetimeIndex and TimedeltaIndex, but not PeriodIndex @@ -408,6 +436,23 @@ def values(self) -> np.ndarray: # NB: For Datetime64TZ this is lossy return self._data._ndarray + # error: Decorated property not supported + @property # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.freq) + def freq(self) -> BaseOffset | None: + # needed to define the setter (same as in DatetimeIndexOpsMixin) + return self._data.freq + + @freq.setter + def freq(self, value) -> None: + self._data.freq = value + + # error: Decorated property not supported + @cache_readonly # type: ignore[misc] + @doc(DatetimeLikeArrayMixin.inferred_freq) + def inferred_freq(self) -> str | None: + return self._data.inferred_freq + # -------------------------------------------------------------------- # Set Operation Methods diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 80138c25b0c27..d6c5d81a8fae1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -121,7 +121,7 @@ def _new_DatetimeIndex(cls, d): DatetimeArray, wrap=True, ) -@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True) +@inherit_names(["is_normalized"], DatetimeArray, cache=True) @inherit_names( [ "tz", @@ -261,7 +261,6 @@ def _engine_type(self) -> type[libindex.DatetimeEngine]: return libindex.DatetimeEngine _data: DatetimeArray - inferred_freq: str | None tz: tzinfo | None # -------------------------------------------------------------------- @@ -308,6 +307,10 @@ def isocalendar(self) -> DataFrame: df = self._data.isocalendar() return df.set_index(self) + @cache_readonly + def _resolution_obj(self) -> Resolution: + return self._data._resolution_obj + # -------------------------------------------------------------------- # Constructors diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index fedcba7aa9644..e2741aa185051 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -167,8 +167,7 @@ def _engine_type(self) -> type[libindex.PeriodEngine]: return libindex.PeriodEngine @cache_readonly - # Signature of "_resolution_obj" incompatible with supertype "DatetimeIndexOpsMixin" - def _resolution_obj(self) -> Resolution: # type: ignore[override] + def _resolution_obj(self) -> Resolution: # for compat with DatetimeIndex return self.dtype._resolution_obj @@ -393,7 +392,7 @@ def is_full(self) -> bool: if not self.is_monotonic_increasing: raise ValueError("Index is not monotonic") values = self.asi8 - return ((values[1:] - values[:-1]) < 2).all() + return ((values[1:] - values[:-1]) < 2).all().item() @property def inferred_type(self) -> str: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 12a8f2c0d5a9d..c7bcdac9c6739 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -112,6 +112,13 @@ def _engine_type(self) -> type[libindex.TimedeltaEngine]: # Use base class method instead of DatetimeTimedeltaMixin._get_string_slice _get_string_slice = Index._get_string_slice + # error: Return type "None" of "_resolution_obj" incompatible with return type + # "Resolution" in supertype "DatetimeIndexOpsMixin" + @property + def _resolution_obj(self) -> None: # type: ignore[override] + # not used but need to implement it because it is an abstract method + return None + # ------------------------------------------------------------------- # Constructors diff --git a/pandas/tests/indexes/period/test_freq_attr.py b/pandas/tests/indexes/period/test_freq_attr.py index e1ecffa4982bd..71115929121bc 100644 --- a/pandas/tests/indexes/period/test_freq_attr.py +++ b/pandas/tests/indexes/period/test_freq_attr.py @@ -20,7 +20,7 @@ def test_freq_setter_deprecated(self): # warning for setter msg = ( - "property 'freq' of 'PeriodArray' object has no setter" + "property 'freq' of 'PeriodIndex' object has no setter" if PY311 else "can't set attribute" )
Similar to #36742 xref #32100
https://api.github.com/repos/pandas-dev/pandas/pulls/48015
2022-08-09T16:41:42Z
2022-11-17T01:31:52Z
null
2022-11-21T02:17:30Z
DOC: Update whatsnew 1.5
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 502e37705abfb..e5ef9b16b62eb 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -932,8 +932,7 @@ Indexing - Bug in setting a NA value (``None`` or ``np.nan``) into a :class:`Series` with int-based :class:`IntervalDtype` incorrectly casting to object dtype instead of a float-based :class:`IntervalDtype` (:issue:`45568`) - Bug in indexing setting values into an ``ExtensionDtype`` column with ``df.iloc[:, i] = values`` with ``values`` having the same dtype as ``df.iloc[:, i]`` incorrectly inserting a new array instead of setting in-place (:issue:`33457`) - Bug in :meth:`Series.__setitem__` with a non-integer :class:`Index` when using an integer key to set a value that cannot be set inplace where a ``ValueError`` was raised instead of casting to a common dtype (:issue:`45070`) -- Bug in :meth:`DataFrame.loc` raising ``NotImplementedError`` when setting value into one column :class:`DataFrame` with all null slice as column indexer (:issue:`45469`) -- Bug in :meth:`DataFrame.loc` not casting ``None`` to ``NA`` when setting value a list into :class:`DataFrame` (:issue:`47987`) +- Bug in :meth:`DataFrame.loc` not casting ``None`` to ``NA`` when setting value as a list into :class:`DataFrame` (:issue:`47987`) - Bug in :meth:`Series.__setitem__` when setting incompatible values into a ``PeriodDtype`` or ``IntervalDtype`` :class:`Series` raising when indexing with a boolean mask but coercing when indexing with otherwise-equivalent indexers; these now consistently coerce, along with :meth:`Series.mask` and :meth:`Series.where` (:issue:`45768`) - Bug in :meth:`DataFrame.where` with multiple columns with datetime-like dtypes failing to downcast results consistent with other dtypes (:issue:`45837`) - Bug in :func:`isin` upcasting to ``float64`` with unsigned integer dtype and list-like argument without a dtype (:issue:`46485`)
Accidentially stumbled across the fact, that this was a regression on main, not in our releases. Hence, removed the whatsnew
https://api.github.com/repos/pandas-dev/pandas/pulls/48013
2022-08-09T06:40:56Z
2022-08-09T18:34:15Z
2022-08-09T18:34:15Z
2022-08-09T18:55:11Z
DOC: Add examples to frequency classes and methods
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e7c3a709bb251..d553c0d77bc56 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -367,7 +367,23 @@ cdef class BaseOffset: def __init__(self, n=1, normalize=False): n = self._validate_n(n) self.n = n + """ + Number of multiples of the frequency. + + Examples + -------- + >>> pd.offsets.Hour(5).n + 5 + """ self.normalize = normalize + """ + Return boolean whether the frequency can align with midnight. + + Examples + -------- + >>> pd.offsets.Hour(5).normalize + False + """ self._cache = {} def __eq__(self, other) -> bool: @@ -417,6 +433,20 @@ cdef class BaseOffset: @property def kwds(self) -> dict: + """ + Return a dict of extra parameters for the offset. + + Examples + -------- + >>> pd.DateOffset(5).kwds + {} + + >>> pd.offsets.FY5253Quarter().kwds + {'weekday': 0, + 'startingMonth': 1, + 'qtr_with_extra_week': 1, + 'variation': 'nearest'} + """ # for backwards-compatibility kwds = {name: getattr(self, name, None) for name in self._attributes if name not in ["n", "normalize"]} @@ -506,6 +536,16 @@ cdef class BaseOffset: def copy(self): # Note: we are deferring directly to __mul__ instead of __rmul__, as # that allows us to use methods that can go in a `cdef class` + """ + Return a copy of the frequency. + + Examples + -------- + >>> freq = pd.DateOffset(1) + >>> freq_copy = freq.copy() + >>> freq is freq_copy + False + """ return self * 1 # ------------------------------------------------------------------ @@ -547,6 +587,17 @@ cdef class BaseOffset: @property def name(self) -> str: + """ + Return a string representing the base frequency. + + Examples + -------- + >>> pd.offsets.Hour().name + 'H' + + >>> pd.offsets.Hour(5).name + 'H' + """ return self.rule_code @property @@ -559,6 +610,23 @@ cdef class BaseOffset: @cache_readonly def freqstr(self) -> str: + """ + Return a string representing the frequency. + + Examples + -------- + >>> pd.DateOffset(5).freqstr + '<5 * DateOffsets>' + + >>> pd.offsets.BusinessHour(2).freqstr + '2BH' + + >>> pd.offsets.Nano().freqstr + 'N' + + >>> pd.offsets.Nano(-3).freqstr + '-3N' + """ try: code = self.rule_code except NotImplementedError: @@ -655,6 +723,28 @@ cdef class BaseOffset: return get_day_of_month(&dts, self._day_opt) def is_on_offset(self, dt: datetime) -> bool: + """ + Return boolean whether a timestamp intersects with this frequency. + + Parameters + ---------- + dt : datetime.datetime + Timestamp to check intersections with frequency. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Day(1) + >>> freq.is_on_offset(ts) + True + + >>> ts = pd.Timestamp(2022, 8, 6) + >>> ts.day_name() + 'Saturday' + >>> freq = pd.offsets.BusinessDay(1) + >>> freq.is_on_offset(ts) + False + """ if self.normalize and not _is_normalized(dt): return False @@ -745,26 +835,96 @@ cdef class BaseOffset: def is_anchored(self) -> bool: # TODO: Does this make sense for the general case? It would help # if there were a canonical docstring for what is_anchored means. + """ + Return boolean whether the frequency is a unit frequency (n=1). + + Examples + -------- + >>> pd.DateOffset().is_anchored() + True + >>> pd.DateOffset(2).is_anchored() + False + """ return self.n == 1 # ------------------------------------------------------------------ def is_month_start(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the month start. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_month_start(ts) + True + """ return ts._get_start_end_field("is_month_start", self) def is_month_end(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the month end. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_month_end(ts) + False + """ return ts._get_start_end_field("is_month_end", self) def is_quarter_start(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the quarter start. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_quarter_start(ts) + True + """ return ts._get_start_end_field("is_quarter_start", self) def is_quarter_end(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the quarter end. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_quarter_end(ts) + False + """ return ts._get_start_end_field("is_quarter_end", self) def is_year_start(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the year start. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_year_start(ts) + True + """ return ts._get_start_end_field("is_year_start", self) def is_year_end(self, _Timestamp ts): + """ + Return boolean whether a timestamp occurs on the year end. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> freq = pd.offsets.Hour(5) + >>> freq.is_year_end(ts) + False + """ return ts._get_start_end_field("is_year_end", self) @@ -837,6 +997,19 @@ cdef class Tick(SingleConstructorOffset): @property def nanos(self) -> int64_t: + """ + Return an integer of the total number of nanoseconds. + + Raises + ------ + ValueError + If the frequency is non-fixed. + + Examples + -------- + >>> pd.offsets.Hour(5).nanos + 18000000000000 + """ return self.n * self._nanos_inc def is_on_offset(self, dt: datetime) -> bool: @@ -1384,6 +1557,12 @@ cdef class BusinessMixin(SingleConstructorOffset): cdef class BusinessDay(BusinessMixin): """ DateOffset subclass representing possibly n business days. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 8, 5) + >>> ts + pd.offsets.BusinessDay() + Timestamp('2022-08-08 00:00:00') """ _period_dtype_code = PeriodDtypeCode.B _prefix = "B" @@ -1504,6 +1683,12 @@ cdef class BusinessHour(BusinessMixin): Start time of your custom business hour in 24h format. end : str, default: "17:00" End time of your custom business hour in 24h format. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 8, 5, 16) + >>> ts + pd.offsets.BusinessHour() + Timestamp('2022-08-08 09:00:00') """ _prefix = "BH" @@ -2037,6 +2222,12 @@ cdef class BYearBegin(YearOffset): cdef class YearEnd(YearOffset): """ DateOffset increments between calendar year ends. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.YearEnd() + Timestamp('2022-12-31 00:00:00') """ _default_month = 12 @@ -2056,6 +2247,12 @@ cdef class YearEnd(YearOffset): cdef class YearBegin(YearOffset): """ DateOffset increments between calendar year begin dates. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.YearBegin() + Timestamp('2023-01-01 00:00:00') """ _default_month = 1 @@ -2201,6 +2398,12 @@ cdef class QuarterEnd(QuarterOffset): startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ... startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ... startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ... + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.QuarterEnd() + Timestamp('2022-03-31 00:00:00') """ _default_starting_month = 3 _prefix = "Q" @@ -2223,6 +2426,12 @@ cdef class QuarterBegin(QuarterOffset): startingMonth = 1 corresponds to dates like 1/01/2007, 4/01/2007, ... startingMonth = 2 corresponds to dates like 2/01/2007, 5/01/2007, ... startingMonth = 3 corresponds to dates like 3/01/2007, 6/01/2007, ... + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.QuarterBegin() + Timestamp('2022-03-01 00:00:00') """ _default_starting_month = 3 _from_name_starting_month = 1 @@ -2263,6 +2472,12 @@ cdef class MonthOffset(SingleConstructorOffset): cdef class MonthEnd(MonthOffset): """ DateOffset of one month end. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.MonthEnd() + Timestamp('2022-01-31 00:00:00') """ _period_dtype_code = PeriodDtypeCode.M _prefix = "M" @@ -2272,6 +2487,12 @@ cdef class MonthEnd(MonthOffset): cdef class MonthBegin(MonthOffset): """ DateOffset of one month at beginning. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.MonthBegin() + Timestamp('2022-02-01 00:00:00') """ _prefix = "MS" _day_opt = "start" @@ -2458,6 +2679,12 @@ cdef class SemiMonthEnd(SemiMonthOffset): n : int normalize : bool, default False day_of_month : int, {1, 3,...,27}, default 15 + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.SemiMonthEnd() + Timestamp('2022-01-15 00:00:00') """ _prefix = "SM" @@ -2479,6 +2706,12 @@ cdef class SemiMonthBegin(SemiMonthOffset): n : int normalize : bool, default False day_of_month : int, {2, 3,...,27}, default 15 + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.SemiMonthBegin() + Timestamp('2022-01-15 00:00:00') """ _prefix = "SMS" @@ -2501,6 +2734,12 @@ cdef class Week(SingleConstructorOffset): ---------- weekday : int or None, default None Always generate specific day of week. 0 for Monday. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.Week() + Timestamp('2022-01-08 00:00:00') """ _inc = timedelta(weeks=1) @@ -2657,6 +2896,12 @@ cdef class WeekOfMonth(WeekOfMonthMixin): - 4 is Friday - 5 is Saturday - 6 is Sunday. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.WeekOfMonth() + Timestamp('2022-01-03 00:00:00') """ _prefix = "WOM" @@ -2722,6 +2967,12 @@ cdef class LastWeekOfMonth(WeekOfMonthMixin): - 4 is Friday - 5 is Saturday - 6 is Sunday. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.LastWeekOfMonth() + Timestamp('2022-01-31 00:00:00') """ _prefix = "LWOM" @@ -2869,6 +3120,12 @@ cdef class FY5253(FY5253Mixin): - "nearest" means year end is **weekday** closest to last day of month in year. - "last" means year end is final **weekday** of the final month in fiscal year. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.FY5253() + Timestamp('2022-01-31 00:00:00') """ _prefix = "RE" @@ -3046,6 +3303,12 @@ cdef class FY5253Quarter(FY5253Mixin): - "nearest" means year end is **weekday** closest to last day of month in year. - "last" means year end is final **weekday** of the final month in fiscal year. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.FY5253Quarter() + Timestamp('2022-01-31 00:00:00') """ _prefix = "REQ" @@ -3221,6 +3484,12 @@ cdef class Easter(SingleConstructorOffset): DateOffset for the Easter holiday using logic defined in dateutil. Right now uses the revised method which is valid in years 1583-4099. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 1, 1) + >>> ts + pd.offsets.Easter() + Timestamp('2022-04-17 00:00:00') """ cpdef __setstate__(self, state): @@ -3282,6 +3551,12 @@ cdef class CustomBusinessDay(BusinessDay): passed to ``numpy.busdaycalendar``. calendar : np.busdaycalendar offset : timedelta, default timedelta(0) + + Examples + -------- + >>> ts = pd.Timestamp(2022, 8, 5) + >>> ts + pd.offsets.CustomBusinessDay(1) + Timestamp('2022-08-08 00:00:00') """ _prefix = "C" @@ -3362,6 +3637,12 @@ cdef class CustomBusinessHour(BusinessHour): Start time of your custom business hour in 24h format. end : str, default: "17:00" End time of your custom business hour in 24h format. + + Examples + -------- + >>> ts = pd.Timestamp(2022, 8, 5, 16) + >>> ts + pd.offsets.CustomBusinessHour() + Timestamp('2022-08-08 09:00:00') """ _prefix = "CBH"
- [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). The goal is to eventually enable the `EX01` (all docstrings have examples), so adding simple examples to the public frequency classes and methods.
https://api.github.com/repos/pandas-dev/pandas/pulls/48011
2022-08-09T04:24:03Z
2022-08-10T21:00:37Z
2022-08-10T21:00:37Z
2022-08-10T21:00:40Z
TST: Refactor index setops test for xdist stability
diff --git a/pandas/conftest.py b/pandas/conftest.py index e176707d8a8f1..c3c1867d2dad1 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -665,21 +665,6 @@ def index_flat(request): index_flat2 = index_flat [email protected]( - params=[ - key - for key in indices_dict - if not isinstance(indices_dict[key], MultiIndex) and indices_dict[key].is_unique - ] -) -def index_flat_unique(request): - """ - index_flat with uniqueness requirement. - """ - key = request.param - return indices_dict[key].copy() - - @pytest.fixture( params=[ key diff --git a/pandas/tests/indexes/test_setops.py b/pandas/tests/indexes/test_setops.py index 45ecd09e550d0..d2f6a1736ff7c 100644 --- a/pandas/tests/indexes/test_setops.py +++ b/pandas/tests/indexes/test_setops.py @@ -317,11 +317,13 @@ def test_symmetric_difference(self, index): (None, None, None), ], ) - def test_corner_union(self, index_flat_unique, fname, sname, expected_name): + def test_corner_union(self, index_flat, fname, sname, expected_name): # GH#9943, GH#9862 # Test unions with various name combinations # Do not test MultiIndex or repeats - index = index_flat_unique + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat # Test copy.union(copy) first = index.copy().set_names(fname) @@ -361,8 +363,10 @@ def test_corner_union(self, index_flat_unique, fname, sname, expected_name): (None, None, None), ], ) - def test_union_unequal(self, index_flat_unique, fname, sname, expected_name): - index = index_flat_unique + def test_union_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat # test copy.union(subset) - need sort for unicode and string first = index.copy().set_names(fname) @@ -381,10 +385,12 @@ def test_union_unequal(self, index_flat_unique, fname, sname, expected_name): (None, None, None), ], ) - def test_corner_intersect(self, index_flat_unique, fname, sname, expected_name): + def test_corner_intersect(self, index_flat, fname, sname, expected_name): # GH#35847 # Test intersections with various name combinations - index = index_flat_unique + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat # Test copy.intersection(copy) first = index.copy().set_names(fname) @@ -424,8 +430,10 @@ def test_corner_intersect(self, index_flat_unique, fname, sname, expected_name): (None, None, None), ], ) - def test_intersect_unequal(self, index_flat_unique, fname, sname, expected_name): - index = index_flat_unique + def test_intersect_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + pytest.skip("Randomly generated index_flat was not unique.") + index = index_flat # test copy.intersection(subset) - need sort for unicode and string first = index.copy().set_names(fname)
- [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). It appears `pytest-xdist` needs all the workers to collect all the same parameterized tests to run. Since data is generated randomly, sometimes the `index_flat_unique` fixture could have a different number of indexes between workers based on uniqueness and cause a build to completely fail. It's only used in 4 tests so just moving the filtering logic to the relevant test. Example build failure: https://github.com/pandas-dev/pandas/runs/7735103409?check_suite_focus=true
https://api.github.com/repos/pandas-dev/pandas/pulls/48010
2022-08-09T04:20:06Z
2022-08-09T16:46:06Z
2022-08-09T16:46:06Z
2022-08-09T16:46:09Z
Backport PR #47979 on branch 1.4.x (DEPS: Update cython)
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 46640505a4c84..b1ea2682b7ea7 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -42,7 +42,7 @@ // followed by the pip installed packages). "matrix": { "numpy": [], - "Cython": ["0.29.30"], + "Cython": ["0.29.32"], "matplotlib": [], "sqlalchemy": [], "scipy": [], diff --git a/ci/deps/actions-310-numpydev.yaml b/ci/deps/actions-310-numpydev.yaml index 2ac97cc479196..90f3483d03dff 100644 --- a/ci/deps/actions-310-numpydev.yaml +++ b/ci/deps/actions-310-numpydev.yaml @@ -16,7 +16,7 @@ dependencies: - pytz - pip - pip: - - cython==0.29.30 # GH#34014 + - cython==0.29.32 - "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple" - "--pre" - "numpy" diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 7a879b5ac9648..df1dfdd38542c 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.10 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/ci/deps/actions-38-downstream_compat.yaml b/ci/deps/actions-38-downstream_compat.yaml index cd501b1e018ef..89a54a857a0a8 100644 --- a/ci/deps/actions-38-downstream_compat.yaml +++ b/ci/deps/actions-38-downstream_compat.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/ci/deps/actions-38-minimum_versions.yaml b/ci/deps/actions-38-minimum_versions.yaml index f6c3f3855aa02..ab0e46feecd33 100644 --- a/ci/deps/actions-38-minimum_versions.yaml +++ b/ci/deps/actions-38-minimum_versions.yaml @@ -7,7 +7,7 @@ dependencies: - python=3.8.0 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/ci/deps/actions-38.yaml b/ci/deps/actions-38.yaml index 4d2ffbfbd9ce5..4feff3100d78f 100644 --- a/ci/deps/actions-38.yaml +++ b/ci/deps/actions-38.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.8 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 202db035a6c52..0ba1dc7f4deb1 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.9 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/ci/deps/actions-pypy-38.yaml b/ci/deps/actions-pypy-38.yaml index 1a3c73cb4ae2f..e06b992acc191 100644 --- a/ci/deps/actions-pypy-38.yaml +++ b/ci/deps/actions-pypy-38.yaml @@ -8,7 +8,7 @@ dependencies: - python=3.8[build=*_pypy] # TODO: use this once pypy3.8 is available # tools - - cython>=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-asyncio diff --git a/ci/deps/circle-38-arm64.yaml b/ci/deps/circle-38-arm64.yaml index 5d3f79602ed37..221887e58d071 100644 --- a/ci/deps/circle-38-arm64.yaml +++ b/ci/deps/circle-38-arm64.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.8 # test dependencies - - cython=0.29.30 + - cython>=0.29.32 - pytest>=6.0 - pytest-cov - pytest-xdist>=1.31 diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index b4224b5b210e0..57b8fdee5888a 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in taking NULL :class:`objects` from a :class:`DataFrame` causing a segmentation violation. These NULL values are created by :meth:`numpy.empty_like` (:issue:`46848`) - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) - Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`) - Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) @@ -36,7 +37,7 @@ Bug fixes Other ~~~~~ -- +- The minimum version of Cython needed to compile pandas is now ``0.29.32`` (:issue:`47978`) - .. --------------------------------------------------------------------------- diff --git a/environment.yml b/environment.yml index a20ccc797eb79..90057bd5d62b5 100644 --- a/environment.yml +++ b/environment.yml @@ -15,7 +15,7 @@ dependencies: # The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms. - c-compiler - cxx-compiler - - cython>=0.29.30 + - cython=0.29.32 # code checks - black=22.3.0 diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 4dc9cbdb7b34d..acb825c53155f 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1227,6 +1227,13 @@ def test_iloc_setitem_nullable_2d_values(self): df.iloc[:] = df.iloc[:, :] tm.assert_frame_equal(df, orig) + def test_getitem_segfault_with_empty_like_object(self): + # GH#46848 + df = DataFrame(np.empty((1, 1), dtype=object)) + df[0] = np.empty_like(df[0]) + # this produces the segfault + df[[0]] + @pytest.mark.parametrize( "null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")] ) diff --git a/pyproject.toml b/pyproject.toml index 318a7398e1035..93dd13476e936 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "setuptools>=51.0.0", "wheel", - "Cython>=0.29.24,<3", # Note: sync with setup.py, environment.yml and asv.conf.json + "Cython>=0.29.32,<3", # Note: sync with setup.py, environment.yml and asv.conf.json "oldest-supported-numpy>=0.10" ] # uncomment to enable pep517 after versioneer problem is fixed. diff --git a/requirements-dev.txt b/requirements-dev.txt index aba5a3b5a23d5..e2b4c73f4a843 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.18.5, <=1.22.4 python-dateutil>=2.8.1 pytz asv -cython>=0.29.30 +cython==0.29.32 black==22.3.0 cpplint flake8==4.0.1 diff --git a/setup.py b/setup.py index ec3fa3c0433f4..5c3531c7d969c 100755 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ def is_platform_mac(): # note: sync with pyproject.toml, environment.yml and asv.conf.json -min_cython_ver = "0.29.30" +min_cython_ver = "0.29.32" try: from Cython import (
Backport PR #47979
https://api.github.com/repos/pandas-dev/pandas/pulls/48009
2022-08-08T19:13:16Z
2022-08-08T21:28:08Z
2022-08-08T21:28:08Z
2023-02-22T11:40:50Z
BUG: Fixed ignoring of nanoseconds when adding to series #47856
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index 0b450fab53137..aece8caef65f1 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -824,6 +824,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.resolution` incorrectly returning "day" instead of "nanosecond" for nanosecond-resolution indexes (:issue:`46903`) - Bug in :class:`Timestamp` with an integer or float value and ``unit="Y"`` or ``unit="M"`` giving slightly-wrong results (:issue:`47266`) - Bug in :class:`.DatetimeArray` construction when passed another :class:`.DatetimeArray` and ``freq=None`` incorrectly inferring the freq from the given array (:issue:`47296`) +- Bug when adding a :class:`DateOffset` to a :class:`Series` would not add the ``nanoseconds`` field (:issue:`47856`) - Timedelta diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 81b59db6f0e18..2b6ae112670f1 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -297,8 +297,8 @@ _relativedelta_kwds = {"years", "months", "weeks", "days", "year", "month", cdef _determine_offset(kwds): # timedelta is used for sub-daily plural offsets and all singular - # offsets relativedelta is used for plural offsets of daily length or - # more nanosecond(s) are handled by apply_wraps + # offsets, relativedelta is used for plural offsets of daily length or + # more, nanosecond(s) are handled by apply_wraps kwds_no_nanos = dict( (k, v) for k, v in kwds.items() if k not in ('nanosecond', 'nanoseconds') @@ -1157,7 +1157,12 @@ cdef class RelativeDeltaOffset(BaseOffset): return dt64other elif not self._use_relativedelta and hasattr(self, "_offset"): # timedelta - delta = Timedelta(self._offset * self.n) + num_nano = getattr(self, "nanoseconds", 0) + if num_nano != 0: + rem_nano = Timedelta(nanoseconds=num_nano) + delta = Timedelta((self._offset + rem_nano) * self.n) + else: + delta = Timedelta(self._offset * self.n) td = (<_Timedelta>delta)._as_reso(reso) return dt64other + td else: diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 49661fe1ec8ce..bca4ba98f37b7 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -33,6 +33,7 @@ from pandas import ( DatetimeIndex, + Series, date_range, ) import pandas._testing as tm @@ -987,7 +988,7 @@ def test_dateoffset_add_sub(offset_kwargs, expected_arg): assert result == expected -def test_dataoffset_add_sub_timestamp_with_nano(): +def test_dateoffset_add_sub_timestamp_with_nano(): offset = DateOffset(minutes=2, nanoseconds=9) ts = Timestamp(4) result = ts + offset @@ -1032,3 +1033,26 @@ def test_construct_int_arg_no_kwargs_assumed_days(n): result = Timestamp(2022, 1, 2) + offset expected = Timestamp(2022, 1, 2 + n) assert result == expected + + [email protected]( + "offset, expected", + [ + ( + DateOffset(minutes=7, nanoseconds=18), + Timestamp("2022-01-01 00:07:00.000000018"), + ), + (DateOffset(nanoseconds=3), Timestamp("2022-01-01 00:00:00.000000003")), + ], +) +def test_dateoffset_add_sub_timestamp_series_with_nano(offset, expected): + # GH 47856 + start_time = Timestamp("2022-01-01") + teststamp = start_time + testseries = Series([start_time]) + testseries = testseries + offset + assert testseries[0] == expected + testseries -= offset + assert testseries[0] == teststamp + testseries = offset + testseries + assert testseries[0] == expected
- [ ] closes #47856 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48008
2022-08-08T17:52:55Z
2022-08-10T00:12:34Z
2022-08-10T00:12:34Z
2022-08-10T00:12:40Z
BUG in read_pickle
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 2928d8c6520b0..373d608876c3e 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -160,11 +160,14 @@ def read_pickle( Notes ----- - read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3. + read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3 + provided the object was serialized with to_pickle. Examples -------- - >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP + >>> original_df = pd.DataFrame( + ... {{"foo": range(5), "bar": range(5, 10)}} + ... ) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 @@ -182,7 +185,7 @@ def read_pickle( 2 2 7 3 3 8 4 4 9 - """ # noqa: E501 + """ excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError) with get_handle( filepath_or_buffer,
- [x] closes #47895 - [x] passes all checks - [ ] Fixed note in the latest `pandas/io/pickle.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/48007
2022-08-08T15:54:11Z
2022-08-09T19:10:46Z
2022-08-09T19:10:46Z
2022-08-09T19:11:33Z
DOC: move release note for #47581
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index dce8fb60ecdd6..b4224b5b210e0 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) - Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`) +- Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) - Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) - diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index ce1b4fd32f60e..bd9f349ef8cb4 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -941,7 +941,6 @@ Indexing - Bug in :meth:`Series.loc` raising with boolean indexer containing ``NA`` when :class:`Index` did not match (:issue:`46551`) - Bug in :meth:`Series.__setitem__` where setting :attr:`NA` into a numeric-dtype :class:`Series` would incorrectly upcast to object-dtype rather than treating the value as ``np.nan`` (:issue:`44199`) - Bug in :meth:`DataFrame.loc` when setting values to a column and right hand side is a dictionary (:issue:`47216`) -- Bug in :meth:`DataFrame.loc` when setting a :class:`DataFrame` not aligning index in some cases (:issue:`47578`) - Bug in :meth:`Series.__setitem__` with ``datetime64[ns]`` dtype, an all-``False`` boolean mask, and an incompatible value incorrectly casting to ``object`` instead of retaining ``datetime64[ns]`` dtype (:issue:`45967`) - Bug in :meth:`Index.__getitem__` raising ``ValueError`` when indexer is from boolean dtype with ``NA`` (:issue:`45806`) - Bug in :meth:`Series.__setitem__` losing precision when enlarging :class:`Series` with scalar (:issue:`32346`)
xref https://github.com/pandas-dev/pandas/pull/47581#issuecomment-1179682819
https://api.github.com/repos/pandas-dev/pandas/pulls/48006
2022-08-08T13:13:41Z
2022-08-08T14:38:28Z
2022-08-08T14:38:28Z
2022-08-08T14:38:37Z
DOC: move release note for #47905
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index a49772fb83ca7..dce8fb60ecdd6 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -27,7 +27,6 @@ Bug fixes ~~~~~~~~~ - The :class:`errors.FutureWarning` raised when passing arguments (other than ``filepath_or_buffer``) as positional in :func:`read_csv` is now raised at the correct stacklevel (:issue:`47385`) - Bug in :meth:`DataFrame.to_sql` when ``method`` was a ``callable`` that did not return an ``int`` and would raise a ``TypeError`` (:issue:`46891`) -- Bug in :func:`read_xml` when reading XML files with Chinese character tags and would raise ``XMLSyntaxError`` (:issue:`47902`) - Bug in :meth:`loc.__getitem__` with a list of keys causing an internal inconsistency that could lead to a disconnect between ``frame.at[x, y]`` vs ``frame[y].loc[x]`` (:issue:`22372`) .. --------------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index eef14d9432eeb..ce1b4fd32f60e 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -1007,6 +1007,7 @@ I/O - Bug in :func:`read_sas` with RLE-compressed SAS7BDAT files that contain 0x00 control bytes (:issue:`47099`) - Bug in :func:`read_parquet` with ``use_nullable_dtypes=True`` where ``float64`` dtype was returned instead of nullable ``Float64`` dtype (:issue:`45694`) - Bug in :meth:`DataFrame.to_json` where ``PeriodDtype`` would not make the serialization roundtrip when read back with :meth:`read_json` (:issue:`44720`) +- Bug in :func:`read_xml` when reading XML files with Chinese character tags and would raise ``XMLSyntaxError`` (:issue:`47902`) Period ^^^^^^
xref https://github.com/pandas-dev/pandas/pull/47925#issuecomment-1207902781
https://api.github.com/repos/pandas-dev/pandas/pulls/48005
2022-08-08T09:51:13Z
2022-08-08T13:07:26Z
2022-08-08T13:07:26Z
2022-08-08T13:07:30Z
SAS7BDAT parser: Fix page count
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 7282affe1b5e6..beac654696ed7 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -313,7 +313,7 @@ def _get_properties(self) -> None: const.page_size_offset + align1, const.page_size_length ) self._page_count = self._read_int( - const.page_count_offset + align1, const.page_count_length + const.page_count_offset + align1, self._int_length ) self.sas_release_offset = self._read_and_convert_header_text(
While page size is 32 bits (https://github.com/Roche/pyreadstat/blob/e0627c7cf406e4296a9af16036a563719fcba237/src/sas/readstat_sas.c#L245) the page count is word size (https://github.com/Roche/pyreadstat/blob/e0627c7cf406e4296a9af16036a563719fcba237/src/sas/readstat_sas.c#L270-L284). Unfortunately I don't have a test file that I can share. - [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/48004
2022-08-08T07:38:57Z
2022-10-04T19:15:53Z
null
2022-10-04T19:15:53Z
TYP: tseries/holiday
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dbddba57ef21c..bceca9a026500 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -97,7 +97,7 @@ repos: - id: pyright_reportGeneralTypeIssues # note: assumes python env is setup and activated name: pyright reportGeneralTypeIssues - entry: pyright --skipunannotated -p pyright_reportGeneralTypeIssues.json + entry: pyright -p pyright_reportGeneralTypeIssues.json language: node pass_filenames: false types: [python] diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 30c770f32c2dc..e1eecd9f446d6 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -250,6 +250,9 @@ class DatetimeIndex(DatetimeTimedeltaMixin): <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. """ + # from inherit_names + dayofweek: Literal[0, 1, 2, 3, 4, 5, 6] + _typ = "datetimeindex" _data_cls = DatetimeArray diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 6426dbcd54489..4be42ee6064b0 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -4,6 +4,10 @@ datetime, timedelta, ) +from typing import ( + TYPE_CHECKING, + Sequence, +) import warnings from dateutil.relativedelta import ( @@ -33,6 +37,9 @@ Easter, ) +if TYPE_CHECKING: + from pandas._libs.tslibs import BaseOffset + def next_monday(dt: datetime) -> datetime: """ @@ -152,14 +159,14 @@ class Holiday: def __init__( self, name, - year=None, - month=None, - day=None, - offset=None, + year: int | None = None, + month: int = 1, + day: int = 1, + offset: BaseOffset | Sequence[BaseOffset] | None = None, observance=None, start_date=None, end_date=None, - days_of_week=None, + days_of_week: tuple[int, ...] | None = None, ) -> None: """ Parameters @@ -322,7 +329,7 @@ def _reference_dates(self, start_date, end_date): return dates - def _apply_rule(self, dates): + def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex: """ Apply the given offset/observance to a DatetimeIndex of dates. diff --git a/pyright_reportGeneralTypeIssues.json b/pyright_reportGeneralTypeIssues.json index c482aa32600fb..4f3ac706849f1 100644 --- a/pyright_reportGeneralTypeIssues.json +++ b/pyright_reportGeneralTypeIssues.json @@ -15,9 +15,17 @@ "pandas/io/clipboard", "pandas/util/version", # and all files that currently don't pass + "pandas/_config/config.py", + "pandas/_config/localization.py", "pandas/_testing/__init__.py", + "pandas/_testing/_io.py", + "pandas/_testing/asserters.py", + "pandas/_version.py", + "pandas/compat/pickle_compat.py", + "pandas/conftest.py", "pandas/core/algorithms.py", "pandas/core/apply.py", + "pandas/core/array_algos/masked_reductions.py", "pandas/core/array_algos/take.py", "pandas/core/arrays/_mixins.py", "pandas/core/arrays/arrow/array.py", @@ -28,26 +36,36 @@ "pandas/core/arrays/datetimes.py", "pandas/core/arrays/interval.py", "pandas/core/arrays/masked.py", + "pandas/core/arrays/numpy_.py", "pandas/core/arrays/period.py", + "pandas/core/arrays/sparse/accessor.py", "pandas/core/arrays/sparse/array.py", "pandas/core/arrays/sparse/dtype.py", "pandas/core/arrays/string_.py", "pandas/core/arrays/string_arrow.py", "pandas/core/arrays/timedeltas.py", + "pandas/core/base.py", "pandas/core/computation/align.py", + "pandas/core/computation/expr.py", + "pandas/core/computation/ops.py", "pandas/core/construction.py", + "pandas/core/describe.py", + "pandas/core/dtypes/astype.py", "pandas/core/dtypes/cast.py", "pandas/core/dtypes/common.py", "pandas/core/dtypes/concat.py", "pandas/core/dtypes/dtypes.py", "pandas/core/frame.py", "pandas/core/generic.py", + "pandas/core/groupby/categorical.py", "pandas/core/groupby/generic.py", "pandas/core/groupby/groupby.py", "pandas/core/groupby/grouper.py", "pandas/core/groupby/ops.py", "pandas/core/indexers/objects.py", "pandas/core/indexers/utils.py", + "pandas/core/indexes/accessors.py", + "pandas/core/indexes/api.py", "pandas/core/indexes/base.py", "pandas/core/indexes/category.py", "pandas/core/indexes/datetimelike.py", @@ -57,7 +75,10 @@ "pandas/core/indexes/multi.py", "pandas/core/indexes/numeric.py", "pandas/core/indexes/period.py", + "pandas/core/indexes/range.py", "pandas/core/indexing.py", + "pandas/core/interchange/column.py", + "pandas/core/interchange/from_dataframe.py", "pandas/core/internals/api.py", "pandas/core/internals/array_manager.py", "pandas/core/internals/blocks.py", @@ -66,20 +87,30 @@ "pandas/core/internals/managers.py", "pandas/core/missing.py", "pandas/core/nanops.py", + "pandas/core/ops/__init__.py", + "pandas/core/ops/array_ops.py", + "pandas/core/ops/common.py", "pandas/core/resample.py", "pandas/core/reshape/concat.py", + "pandas/core/reshape/melt.py", "pandas/core/reshape/merge.py", "pandas/core/reshape/pivot.py", "pandas/core/reshape/reshape.py", "pandas/core/reshape/tile.py", + "pandas/core/sample.py", "pandas/core/series.py", "pandas/core/sorting.py", "pandas/core/strings/accessor.py", + "pandas/core/strings/object_array.py", "pandas/core/tools/datetimes.py", + "pandas/core/tools/numeric.py", "pandas/core/tools/timedeltas.py", + "pandas/core/tools/times.py", "pandas/core/util/hashing.py", + "pandas/core/window/common.py", "pandas/core/window/ewm.py", "pandas/core/window/rolling.py", + "pandas/io/clipboards.py", "pandas/io/common.py", "pandas/io/excel/_base.py", "pandas/io/excel/_odfreader.py", @@ -87,24 +118,36 @@ "pandas/io/excel/_openpyxl.py", "pandas/io/excel/_pyxlsb.py", "pandas/io/excel/_xlrd.py", + "pandas/io/formats/console.py", "pandas/io/formats/csvs.py", "pandas/io/formats/excel.py", "pandas/io/formats/format.py", "pandas/io/formats/info.py", "pandas/io/formats/printing.py", + "pandas/io/formats/string.py", "pandas/io/formats/style.py", "pandas/io/formats/style_render.py", "pandas/io/formats/xml.py", "pandas/io/json/_json.py", + "pandas/io/orc.py", "pandas/io/parquet.py", "pandas/io/parsers/arrow_parser_wrapper.py", "pandas/io/parsers/base_parser.py", "pandas/io/parsers/c_parser_wrapper.py", + "pandas/io/parsers/python_parser.py", "pandas/io/pytables.py", - "pandas/io/sas/sas_xport.py", "pandas/io/sql.py", "pandas/io/stata.py", "pandas/io/xml.py", + "pandas/plotting/_core.py", + "pandas/plotting/_matplotlib/boxplot.py", + "pandas/plotting/_matplotlib/converter.py", + "pandas/plotting/_matplotlib/core.py", + "pandas/plotting/_matplotlib/hist.py", + "pandas/plotting/_matplotlib/misc.py", + "pandas/plotting/_matplotlib/timeseries.py", "pandas/tseries/frequencies.py", + "pandas/util/_decorators.py", + "pandas/util/_test_decorators.py", ], }
null
https://api.github.com/repos/pandas-dev/pandas/pulls/48003
2022-08-07T19:25:53Z
2022-08-09T16:47:07Z
null
2022-09-10T01:38:41Z
TYP: annotate functions that always error with NoReturn
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index d6b63956422c7..5feed98cbc75b 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -15,6 +15,7 @@ Generic, Hashable, Iterator, + NoReturn, Sequence, final, ) @@ -1243,7 +1244,7 @@ def groupings(self) -> list[grouper.Grouping]: ping = grouper.Grouping(lev, lev, in_axis=False, level=None) return [ping] - def _aggregate_series_fast(self, obj: Series, func: Callable) -> np.ndarray: + def _aggregate_series_fast(self, obj: Series, func: Callable) -> NoReturn: # -> np.ndarray[object] raise NotImplementedError( "This should not be reached; use _aggregate_series_pure_python" diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ffb222c61f6ab..14f9b71c5e03c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -12,6 +12,7 @@ Hashable, Iterable, Literal, + NoReturn, Sequence, TypeVar, cast, @@ -3166,7 +3167,7 @@ def __xor__(self, other): return self.symmetric_difference(other) @final - def __nonzero__(self): + def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 90713e846fbd1..043fd07b28025 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -8,7 +8,10 @@ """ from __future__ import annotations -from typing import Any +from typing import ( + Any, + NoReturn, +) from pandas.core.base import PandasObject @@ -93,7 +96,7 @@ def __reduce__(self): def __hash__(self) -> int: # type: ignore[override] return hash(tuple(self)) - def _disabled(self, *args, **kwargs): + def _disabled(self, *args, **kwargs) -> NoReturn: """ This method will not function because object is immutable. """
There are more non-abstract functions that also error. Most of them should probably be abstract functions. I ignored those functions.
https://api.github.com/repos/pandas-dev/pandas/pulls/48002
2022-08-07T15:52:33Z
2022-08-08T21:17:53Z
2022-08-08T21:17:53Z
2022-09-10T01:38:43Z
BUG Fix: Series.add(DataFrame) fails to align
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bdf811f6a8f6a..af4ab3402e7fd 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -855,6 +855,7 @@ Numeric - Bug in division, ``pow`` and ``mod`` operations on array-likes with ``dtype="boolean"`` not being like their ``np.bool_`` counterparts (:issue:`46063`) - Bug in multiplying a :class:`Series` with ``IntegerDtype`` or ``FloatingDtype`` by an array-like with ``timedelta64[ns]`` dtype incorrectly raising (:issue:`45622`) - Bug in :meth:`mean` where the optional dependency ``bottleneck`` causes precision loss linear in the length of the array. ``bottleneck`` has been disabled for :meth:`mean` improving the loss to log-linear but may result in a performance decrease. (:issue:`42878`) +- Bug in :meth:`flex_method_SERIES` Series add with DataFrame fails to align (:issue:`46179`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index e9fefd9268870..d3399a11f06a6 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -191,9 +191,15 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): result.name = res_name return result else: + from pandas import DataFrame + if fill_value is not None: self = self.fillna(fill_value) + if isinstance(other, DataFrame): + self, other = align_method_FRAME( + other, self, axis, flex=True, level=level + ) return op(self, other) flex_wrapper.__name__ = name diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 25257a2c102fd..979417aaecfa5 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -2063,3 +2063,21 @@ def test_enum_column_equality(): expected = Series([True, True, True], name=Cols.col1) tm.assert_series_equal(result, expected) + + +def test_series_add_dataframe_align(): + # GH 46179 + df = DataFrame( + {2010: [1], 2020: [3]}, + index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]), + ) + s = Series( + [10.0, 20.0, 30.0], + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = s.add(df, axis=0) + expected = df.add(s, axis=0) + + tm.assert_frame_equal(result, expected)
- [ ] closes #46179 - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47999
2022-08-07T12:28:30Z
2022-08-08T21:20:43Z
null
2022-08-09T05:01:16Z
WARN,TST check stacklevel for all warnings
diff --git a/doc/source/development/contributing_codebase.rst b/doc/source/development/contributing_codebase.rst index c74c44fb1d5f0..bc85a54e61f22 100644 --- a/doc/source/development/contributing_codebase.rst +++ b/doc/source/development/contributing_codebase.rst @@ -122,6 +122,7 @@ Otherwise, you need to do it manually: .. code-block:: python import warnings + from pandas.util._exceptions import find_stack_level def old_func(): @@ -130,7 +131,11 @@ Otherwise, you need to do it manually: .. deprecated:: 1.1.0 Use new_func instead. """ - warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) + warnings.warn( + 'Use new_func instead.', + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) new_func() diff --git a/pandas/_config/config.py b/pandas/_config/config.py index d5e77d824340d..fc35b95bba7dd 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -54,6 +54,7 @@ ContextDecorator, contextmanager, ) +import inspect import re from typing import ( Any, @@ -70,6 +71,7 @@ F, T, ) +from pandas.util._exceptions import find_stack_level class DeprecatedOption(NamedTuple): @@ -657,7 +659,11 @@ def _warn_if_deprecated(key: str) -> bool: d = _get_deprecated_option(key) if d: if d.msg: - warnings.warn(d.msg, FutureWarning) + warnings.warn( + d.msg, + FutureWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) else: msg = f"'{key}' is deprecated" if d.removal_ver: @@ -667,7 +673,9 @@ def _warn_if_deprecated(key: str) -> bool: else: msg += ", please refrain from using it." - warnings.warn(msg, FutureWarning) + warnings.warn( + msg, FutureWarning, stacklevel=find_stack_level(inspect.currentframe()) + ) return True return False diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 7cacc8cc639f7..2982110ea35cc 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -230,7 +230,7 @@ def _warning_interval(inclusive: str | None = None, closed: None | lib.NoDefault warnings.warn( "Argument `closed` is deprecated in favor of `inclusive`.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(inspect.currentframe()), ) if closed is None: inclusive = "right" diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index c90c9003c8d60..8e4b23f32f48c 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -1,6 +1,7 @@ from collections import abc from decimal import Decimal from enum import Enum +import inspect from typing import Literal import warnings @@ -30,6 +31,8 @@ from cython cimport ( floating, ) +from pandas.util._exceptions import find_stack_level + import_datetime() import numpy as np @@ -352,6 +355,7 @@ def fast_unique_multiple(list arrays, sort: bool = True): "The values in the array are unorderable. " "Pass `sort=False` to suppress this warning.", RuntimeWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) pass diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index b07fa143c98b6..e8b7160af9b2c 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -8,10 +8,13 @@ from csv import ( QUOTE_NONNUMERIC, ) from errno import ENOENT +import inspect import sys import time import warnings +from pandas.util._exceptions import find_stack_level + cimport cython from cpython.bytes cimport ( PyBytes_AsString, @@ -958,7 +961,7 @@ cdef class TextReader: "Defining usecols with out of bounds indices is deprecated " "and will raise a ParserError in a future version.", FutureWarning, - stacklevel=6, + stacklevel=find_stack_level(inspect.currentframe()), ) results = {} @@ -1009,7 +1012,7 @@ cdef class TextReader: warnings.warn((f"Both a converter and dtype were specified " f"for column {name} - only the converter will " f"be used."), ParserWarning, - stacklevel=5) + stacklevel=find_stack_level(inspect.currentframe())) results[i] = _apply_converter(conv, self.parser, i, start, end) continue diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 55057ff628619..598e6b552e49b 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,3 +1,4 @@ +import inspect import warnings cimport cython @@ -9,6 +10,8 @@ from cpython.datetime cimport ( tzinfo, ) +from pandas.util._exceptions import find_stack_level + # import datetime C API import_datetime() @@ -845,7 +848,7 @@ cdef inline bint _parse_today_now(str val, int64_t* iresult, bint utc): "deprecated. In a future version, this will match Timestamp('now') " "and Timestamp.now()", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return True diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 0dfb859a3444f..b25095ead790b 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -1,9 +1,13 @@ +import inspect + cimport cython import warnings import numpy as np +from pandas.util._exceptions import find_stack_level + cimport numpy as cnp from cpython.object cimport PyObject from numpy cimport ( @@ -287,7 +291,7 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit, "Conversion of non-round float with unit={unit} is ambiguous " "and will raise in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) ts = cast_from_unit(ts, unit) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index b3dd5b7907cad..909541d24121e 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -1,5 +1,8 @@ +import inspect import warnings +from pandas.util._exceptions import find_stack_level + from cpython.datetime cimport ( PyDate_Check, PyDateTime_Check, @@ -135,7 +138,7 @@ cdef class _NaT(datetime): "order to match the standard library behavior. " "In a future version these will be considered non-comparable.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return False @@ -379,7 +382,7 @@ class NaTType(_NaT): warnings.warn( "NaT.freq is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return None diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index d553c0d77bc56..d799770a57be2 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1,8 +1,11 @@ +import inspect import operator import re import time import warnings +from pandas.util._exceptions import find_stack_level + cimport cython from cpython.datetime cimport ( PyDate_Check, @@ -499,7 +502,7 @@ cdef class BaseOffset: "DateOffset.__call__ is deprecated and will be removed in a future " "version. Use `offset + other` instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self._apply(other) @@ -509,7 +512,7 @@ cdef class BaseOffset: f"{type(self).__name__}.apply is deprecated and will be removed " "in a future version. Use `offset + other` instead", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(inspect.currentframe()), ) return self._apply(other) @@ -820,7 +823,7 @@ cdef class BaseOffset: warnings.warn( "onOffset is a deprecated, use is_on_offset instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self.is_on_offset(dt) @@ -828,7 +831,7 @@ cdef class BaseOffset: warnings.warn( "isAnchored is a deprecated, use is_anchored instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self.is_anchored() diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 8c223020c4012..b442e32071011 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -1,10 +1,13 @@ """ Parsing functions for datetime and datetime-like strings. """ +import inspect import re import time import warnings +from pandas.util._exceptions import find_stack_level + cimport cython from cpython.datetime cimport ( datetime, @@ -214,7 +217,7 @@ cdef inline object _parse_delimited_date(str date_string, bint dayfirst): format='MM/DD/YYYY', dayfirst='True', ), - stacklevel=4, + stacklevel=find_stack_level(inspect.currentframe()), ) elif not dayfirst and swapped_day_and_month: warnings.warn( @@ -222,7 +225,7 @@ cdef inline object _parse_delimited_date(str date_string, bint dayfirst): format='DD/MM/YYYY', dayfirst='False (the default)', ), - stacklevel=4, + stacklevel=find_stack_level(inspect.currentframe()), ) # In Python <= 3.6.0 there is no range checking for invalid dates # in C api, thus we call faster C version for 3.6.1 or newer diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index fa264f29aa8a8..d2d4838bfafc0 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1,5 +1,8 @@ +import inspect import warnings +from pandas.util._exceptions import find_stack_level + cimport numpy as cnp from cpython.object cimport ( Py_EQ, @@ -1827,7 +1830,7 @@ cdef class _Period(PeriodMixin): "be removed in a future version. Use " "`per.to_timestamp(...).tz_localize(tz)` instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) how = validate_end_alias(how) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 215d1c9d6c722..f53d4ccf2d555 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,6 +1,9 @@ import collections +import inspect import warnings +from pandas.util._exceptions import find_stack_level + cimport cython from cpython.object cimport ( Py_EQ, @@ -683,7 +686,7 @@ cdef inline timedelta_from_spec(object number, object frac, object unit): "Units 'M', 'Y' and 'y' do not represent unambiguous " "timedelta values and will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(inspect.currentframe()), ) if unit == 'M': @@ -1055,7 +1058,7 @@ cdef class _Timedelta(timedelta): warnings.warn( "Timedelta.freq is deprecated and will be removed in a future version", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return None @@ -1065,7 +1068,7 @@ cdef class _Timedelta(timedelta): warnings.warn( "Timedelta.is_populated is deprecated and will be removed in a future version", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self._is_populated @@ -1269,7 +1272,7 @@ cdef class _Timedelta(timedelta): warnings.warn( "Timedelta.delta is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self.value diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 66d848ba43da9..2655c25ed0893 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -6,6 +6,7 @@ construction requirements, we need to do object instantiation in python (see Timestamp class below). This will serve as a C extension type that shadows the python class, where we do any heavy lifting. """ +import inspect import warnings cimport cython @@ -47,6 +48,9 @@ import_datetime() from pandas._libs.tslibs cimport ccalendar from pandas._libs.tslibs.base cimport ABCTimestamp + +from pandas.util._exceptions import find_stack_level + from pandas._libs.tslibs.conversion cimport ( _TSObject, convert_datetime_to_tsobject, @@ -253,7 +257,7 @@ cdef class _Timestamp(ABCTimestamp): warnings.warn( "Timestamp.freq is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self._freq @@ -365,7 +369,7 @@ cdef class _Timestamp(ABCTimestamp): "In a future version these will be considered non-comparable. " "Use 'ts == pd.Timestamp(date)' or 'ts.date() == date' instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return NotImplemented else: @@ -666,7 +670,7 @@ cdef class _Timestamp(ABCTimestamp): "version. When you have a freq, use " f"freq.{field}(timestamp) instead.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) @property @@ -1172,7 +1176,7 @@ cdef class _Timestamp(ABCTimestamp): """ if self.nanosecond != 0 and warn: warnings.warn("Discarding nonzero nanoseconds in conversion.", - UserWarning, stacklevel=2) + UserWarning, stacklevel=find_stack_level(inspect.currentframe())) return datetime(self.year, self.month, self.day, self.hour, self.minute, self.second, @@ -1251,6 +1255,7 @@ cdef class _Timestamp(ABCTimestamp): warnings.warn( "Converting to Period representation will drop timezone information.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) if freq is None: @@ -1259,7 +1264,7 @@ cdef class _Timestamp(ABCTimestamp): "In a future version, calling 'Timestamp.to_period()' without " "passing a 'freq' will raise an exception.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(inspect.currentframe()), ) return Period(self, freq=freq) @@ -1451,7 +1456,7 @@ class Timestamp(_Timestamp): "Timestamp.utcfromtimestamp(ts).tz_localize(None). " "To get the future behavior, use Timestamp.fromtimestamp(ts, 'UTC')", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return cls(datetime.utcfromtimestamp(ts)) @@ -1687,7 +1692,7 @@ class Timestamp(_Timestamp): "as a wall time, not a UTC time. To interpret as a UTC time, " "use `Timestamp(dt64).tz_localize('UTC').tz_convert(tz)`", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) # Once this deprecation is enforced, we can do # return Timestamp(ts_input).tz_localize(tzobj) @@ -1704,7 +1709,7 @@ class Timestamp(_Timestamp): "The 'freq' argument in Timestamp is deprecated and will be " "removed in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) if not is_offset_object(freq): freq = to_offset(freq) @@ -2040,7 +2045,7 @@ timedelta}, default 'raise' warnings.warn( "Timestamp.freqstr is deprecated and will be removed in a future version.", FutureWarning, - stacklevel=1, + stacklevel=find_stack_level(inspect.currentframe()), ) return self._freqstr diff --git a/pandas/_testing/_warnings.py b/pandas/_testing/_warnings.py index e9df85eae550a..a5b0d1e199863 100644 --- a/pandas/_testing/_warnings.py +++ b/pandas/_testing/_warnings.py @@ -130,9 +130,7 @@ def _assert_caught_expected_warning( if issubclass(actual_warning.category, expected_warning): saw_warning = True - if check_stacklevel and issubclass( - actual_warning.category, (FutureWarning, DeprecationWarning) - ): + if check_stacklevel: _assert_raised_with_correct_stacklevel(actual_warning) if match is not None: diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 3801a1648f1e7..c2d1927bccfff 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -1,10 +1,13 @@ from __future__ import annotations import importlib +import inspect import sys import types import warnings +from pandas.util._exceptions import find_stack_level + from pandas.util.version import Version # Update install.rst when updating versions! @@ -159,7 +162,11 @@ def import_optional_dependency( f"(version '{version}' currently installed)." ) if errors == "warn": - warnings.warn(msg, UserWarning) + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return None elif errors == "raise": raise ImportError(msg) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index a4736c2a141a5..7222e465cb710 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1036,7 +1036,10 @@ def mode( try: npresult = np.sort(npresult) except TypeError as err: - warnings.warn(f"Unable to sort modes: {err}") + warnings.warn( + f"Unable to sort modes: {err}", + stacklevel=find_stack_level(inspect.currentframe()), + ) result = _reconstruct_data(npresult, original.dtype, original) return result diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 0d228582992d2..fff7772c2cf7d 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1011,7 +1011,7 @@ def set_categories( "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, - stacklevel=2, + stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2c070499308a7..253b582eddf2c 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1373,6 +1373,7 @@ def _addsub_object_array(self, other: np.ndarray, op): "Adding/subtracting object-dtype array to " f"{type(self).__name__} not vectorized.", PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) # Caller is responsible for broadcasting if necessary diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 58dee30288be9..64c15df64de3b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -702,6 +702,7 @@ def _add_offset(self, offset) -> DatetimeArray: warnings.warn( "Non-vectorized DateOffset being applied to Series or DatetimeIndex.", PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) result = self.astype("O") + offset result = type(self)._from_sequence(result) @@ -1099,6 +1100,7 @@ def to_period(self, freq=None) -> PeriodArray: "Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) if freq is None: diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index e9302efdce2e7..13ebd2b25e949 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -778,7 +778,11 @@ def fillna( elif method is not None: msg = "fillna with 'method' requires high memory usage." - warnings.warn(msg, PerformanceWarning) + warnings.warn( + msg, + PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) new_values = np.asarray(self) # interpolate_2d modifies new_values inplace interpolate_2d(new_values, method=method, limit=limit) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index e82bec47c6ac5..a4b83fc557413 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -7,6 +7,7 @@ """ from __future__ import annotations +import inspect import operator import warnings @@ -15,6 +16,7 @@ from pandas._config import get_option from pandas._typing import FuncType +from pandas.util._exceptions import find_stack_level from pandas.core.computation.check import NUMEXPR_INSTALLED from pandas.core.ops import roperator @@ -214,7 +216,8 @@ def _bool_arith_fallback(op_str, a, b): warnings.warn( f"evaluating in Python space because the {repr(op_str)} " "operator is not supported by numexpr for the bool dtype, " - f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead." + f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.", + stacklevel=find_stack_level(inspect.currentframe()), ) return True return False diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 45b839f175a88..7ed6e0d84445c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2079,7 +2079,7 @@ def __array_wrap__( "The __array_wrap__ method of DataFrame and Series will be removed in " "a future version", DeprecationWarning, - stacklevel=2, + stacklevel=find_stack_level(inspect.currentframe()), ) res = lib.item_from_zerodim(result) if is_scalar(res): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1383f850ab043..816260c8a6d2d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -7437,7 +7437,7 @@ def _maybe_cast_data_without_dtype( "In a future version, the Index constructor will not infer numeric " "dtypes when passed object-dtype sequences (matching Series behavior)", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(inspect.currentframe()), ) result = ensure_wrapped_if_datetimelike(result) return result diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 50aaac211c7a5..ccaf37bd98b85 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1293,6 +1293,7 @@ def _maybe_coerce_merge_keys(self) -> None: "columns where the float values " "are not equal to their int representation.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) continue @@ -1305,6 +1306,7 @@ def _maybe_coerce_merge_keys(self) -> None: "columns where the float values " "are not equal to their int representation.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) continue diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 52b059f6b92af..0270a5dd75952 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import itertools from typing import ( TYPE_CHECKING, @@ -13,6 +14,7 @@ from pandas._typing import npt from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( @@ -130,6 +132,7 @@ def __init__(self, index: MultiIndex, level=-1, constructor=None) -> None: f"The following operation may generate {num_cells} cells " f"in the resulting pandas object.", PerformanceWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) self._make_selectors() diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 27fb06dfb6023..03599497f8d03 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -40,8 +40,11 @@ Pyperclip into running them with whatever permissions the Python process has. """ +import inspect + __version__ = "1.7.0" + import contextlib import ctypes from ctypes import ( @@ -62,6 +65,7 @@ PyperclipException, PyperclipWindowsException, ) +from pandas.util._exceptions import find_stack_level # `import PyQt4` sys.exit()s if DISPLAY is not in the environment. # Thus, we need to detect the presence of $DISPLAY manually @@ -270,10 +274,14 @@ def copy_dev_clipboard(text): if text == "": warnings.warn( "Pyperclip cannot copy a blank string to the clipboard on Cygwin. " - "This is effectively a no-op." + "This is effectively a no-op.", + stacklevel=find_stack_level(inspect.currentframe()), ) if "\r" in text: - warnings.warn("Pyperclip cannot handle \\r characters on Cygwin.") + warnings.warn( + "Pyperclip cannot handle \\r characters on Cygwin.", + stacklevel=find_stack_level(inspect.currentframe()), + ) with open("/dev/clipboard", "wt") as fd: fd.write(text) @@ -517,7 +525,8 @@ def determine_clipboard(): if os.path.exists("/dev/clipboard"): warnings.warn( "Pyperclip's support for Cygwin is not perfect, " - "see https://github.com/asweigart/pyperclip/issues/55" + "see https://github.com/asweigart/pyperclip/issues/55", + stacklevel=find_stack_level(inspect.currentframe()), ) return init_dev_clipboard_clipboard() diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 0968f1facf128..7cf01affd5a19 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,9 +1,12 @@ """ io on the clipboard """ from __future__ import annotations +import inspect from io import StringIO import warnings +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.generic import ABCDataFrame from pandas import ( @@ -79,7 +82,8 @@ def read_clipboard(sep: str = r"\s+", **kwargs): # pragma: no cover kwargs["engine"] = "python" elif len(sep) > 1 and kwargs.get("engine") == "c": warnings.warn( - "read_clipboard with regex separator does not work properly with c engine." + "read_clipboard with regex separator does not work properly with c engine.", + stacklevel=find_stack_level(inspect.currentframe()), ) return read_csv(StringIO(text), sep=sep, **kwargs) @@ -135,10 +139,14 @@ def to_clipboard( return except TypeError: warnings.warn( - "to_clipboard in excel mode requires a single character separator." + "to_clipboard in excel mode requires a single character separator.", + stacklevel=find_stack_level(inspect.currentframe()), ) elif sep is not None: - warnings.warn("to_clipboard with excel=False ignores the sep argument.") + warnings.warn( + "to_clipboard with excel=False ignores the sep argument.", + stacklevel=find_stack_level(inspect.currentframe()), + ) if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation diff --git a/pandas/io/common.py b/pandas/io/common.py index 7add6ec10222c..2fae19df13f8b 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -339,6 +339,7 @@ def _get_filepath_or_buffer( warnings.warn( f"{compression} will not write the byte order mark for {encoding}", UnicodeWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) # Use binary mode when converting path-like objects to file-like objects (fsspec) diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index 778df087d28d8..e86a1b0bcd635 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -3,6 +3,7 @@ """ from __future__ import annotations +import inspect import re from typing import ( Callable, @@ -13,6 +14,7 @@ import warnings from pandas.errors import CSSWarning +from pandas.util._exceptions import find_stack_level def _side_expander(prop_fmt: str) -> Callable: @@ -46,7 +48,11 @@ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: try: mapping = self.SIDE_SHORTHANDS[len(tokens)] except KeyError: - warnings.warn(f'Could not expand "{prop}: {value}"', CSSWarning) + warnings.warn( + f'Could not expand "{prop}: {value}"', + CSSWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return for key, idx in zip(self.SIDES, mapping): yield prop_fmt.format(key), tokens[idx] @@ -88,7 +94,9 @@ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]: tokens = value.split() if len(tokens) == 0 or len(tokens) > 3: warnings.warn( - f'Too many tokens provided to "{prop}" (expected 1-3)', CSSWarning + f'Too many tokens provided to "{prop}" (expected 1-3)', + CSSWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) # TODO: Can we use current color as initial value to comply with CSS standards? @@ -324,7 +332,11 @@ def _update_other_units(self, props: dict[str, str]) -> dict[str, str]: def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS): def _error(): - warnings.warn(f"Unhandled size: {repr(in_val)}", CSSWarning) + warnings.warn( + f"Unhandled size: {repr(in_val)}", + CSSWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return self.size_to_pt("1!!default", conversions=conversions) match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val) @@ -396,4 +408,5 @@ def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]: warnings.warn( f"Ill-formatted attribute: expected a colon in {repr(decl)}", CSSWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 5a7d0a4690bbd..c4ddac088d901 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -7,6 +7,7 @@ lru_cache, reduce, ) +import inspect import itertools import re from typing import ( @@ -28,6 +29,7 @@ StorageOptions, ) from pandas.util._decorators import doc +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes import missing from pandas.core.dtypes.common import ( @@ -427,7 +429,11 @@ def color_to_excel(self, val: str | None) -> str | None: try: return self.NAMED_COLORS[val] except KeyError: - warnings.warn(f"Unhandled color format: {repr(val)}", CSSWarning) + warnings.warn( + f"Unhandled color format: {repr(val)}", + CSSWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return None def _is_hex_color(self, color_string: str) -> bool: diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index b7a8b5cc82f7a..7c323992d11a0 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -5,6 +5,7 @@ """ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, Any, @@ -17,6 +18,7 @@ DtypeObj, JSONSerializable, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import ( @@ -100,10 +102,14 @@ def set_default_names(data): if com.all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == "index": - warnings.warn("Index name of 'index' is not round-trippable.") + warnings.warn( + "Index name of 'index' is not round-trippable.", + stacklevel=find_stack_level(inspect.currentframe()), + ) elif len(nms) > 1 and any(x.startswith("level_") for x in nms): warnings.warn( - "Index names beginning with 'level_' are not round-trippable." + "Index names beginning with 'level_' are not round-trippable.", + stacklevel=find_stack_level(inspect.currentframe()), ) return data diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f7d5fb9270247..96ba6b2e84cf3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3545,7 +3545,11 @@ def validate_version(self, where=None) -> None: if where is not None: if self.is_old_version: ws = incompatibility_doc % ".".join([str(x) for x in self.version]) - warnings.warn(ws, IncompatibilityWarning) + warnings.warn( + ws, + IncompatibilityWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) def validate_min_itemsize(self, min_itemsize) -> None: """ diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index a2e217767d1d4..648c58dee6600 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -11,6 +11,7 @@ from collections import abc from datetime import datetime +import inspect import struct import warnings @@ -23,6 +24,7 @@ ReadBuffer, ) from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level import pandas as pd @@ -412,7 +414,10 @@ def _record_count(self) -> int: total_records_length = self.filepath_or_buffer.tell() - self.record_start if total_records_length % 80 != 0: - warnings.warn("xport file may be corrupted.") + warnings.warn( + "xport file may be corrupted.", + stacklevel=find_stack_level(inspect.currentframe()), + ) if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 2b835a1e7ebed..ee6564d103147 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -762,6 +762,7 @@ def pandasSQL_builder(con, schema: str | None = None) -> SQLDatabase | SQLiteDat "database string URI or sqlite3 DBAPI2 connection. " "Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) return SQLiteDatabase(con) @@ -1655,7 +1656,11 @@ def check_case_sensitive( "due to case sensitivity issues. Consider using lower " "case table names." ) - warnings.warn(msg, UserWarning) + warnings.warn( + msg, + UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) def to_sql( self, diff --git a/pandas/io/stata.py b/pandas/io/stata.py index e59b6c8770389..80e7f54d828b5 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -13,6 +13,7 @@ from collections import abc import datetime +import inspect from io import BytesIO import os import struct @@ -51,6 +52,7 @@ Appender, doc, ) +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, @@ -348,7 +350,10 @@ def convert_delta_safe(base, deltas, unit) -> Series: conv_dates = convert_delta_safe(base, ms, "ms") elif fmt.startswith(("%tC", "tC")): - warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.") + warnings.warn( + "Encountered %tC format. Leaving in Stata Internal Format.", + stacklevel=find_stack_level(inspect.currentframe()), + ) conv_dates = Series(dates, dtype=object) if has_bad_values: conv_dates[bad_locs] = NaT @@ -462,7 +467,10 @@ def g(x: datetime.datetime) -> int: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: - warnings.warn("Stata Internal Format tC not supported.") + warnings.warn( + "Stata Internal Format tC not supported.", + stacklevel=find_stack_level(inspect.currentframe()), + ) conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) @@ -642,7 +650,11 @@ def _cast_to_stata_types(data: DataFrame) -> DataFrame: sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] data.loc[orig_missing, col] = sentinel if ws: - warnings.warn(ws, PossiblePrecisionLoss) + warnings.warn( + ws, + PossiblePrecisionLoss, + stacklevel=find_stack_level(inspect.currentframe()), + ) return data @@ -697,6 +709,7 @@ def _prepare_value_labels(self): warnings.warn( value_label_mismatch_doc.format(self.labname), ValueLabelTypeMismatch, + stacklevel=find_stack_level(inspect.currentframe()), ) category = category.encode(self._encoding) offsets.append(self.text_len) @@ -1506,7 +1519,11 @@ def _decode(self, s: bytes) -> str: so the fallback encoding of latin-1 is being used. This can happen when a file has been incorrectly encoded by Stata or some other software. You should verify the string values returned are correct.""" - warnings.warn(msg, UnicodeWarning) + warnings.warn( + msg, + UnicodeWarning, + stacklevel=find_stack_level(inspect.currentframe()), + ) return s.decode("latin-1") def _read_value_labels(self) -> None: @@ -1902,7 +1919,9 @@ def _do_convert_categoricals( if self._using_iterator: # warn is using an iterator warnings.warn( - categorical_conversion_warning, CategoricalConversionWarning + categorical_conversion_warning, + CategoricalConversionWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) initial_categories = None cat_data = Categorical( @@ -2482,7 +2501,11 @@ def _check_column_names(self, data: DataFrame) -> DataFrame: conversion_warning.append(msg) ws = invalid_name_doc.format("\n ".join(conversion_warning)) - warnings.warn(ws, InvalidColumnName) + warnings.warn( + ws, + InvalidColumnName, + stacklevel=find_stack_level(inspect.currentframe()), + ) self._converted_names = converted_names self._update_strl_names() @@ -2649,6 +2672,7 @@ def write_file(self) -> None: f"This save was not successful but {self._fname} could not " "be deleted. This file is not valid.", ResourceWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) raise exc diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 045c27bb8fe56..d85495b70e6c3 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect from typing import ( TYPE_CHECKING, Literal, @@ -10,6 +11,8 @@ from matplotlib.artist import setp import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import is_dict_like from pandas.core.dtypes.missing import remove_na_arraylike @@ -89,7 +92,8 @@ def _validate_color_args(self): if self.colormap is not None: warnings.warn( "'color' and 'colormap' cannot be used " - "simultaneously. Using 'color'" + "simultaneously. Using 'color'", + stacklevel=find_stack_level(inspect.currentframe()), ) self.color = self.kwds.pop("color") diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index ee7493813f13a..7d8c7da6dd9aa 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -4,6 +4,7 @@ ABC, abstractmethod, ) +import inspect from typing import ( TYPE_CHECKING, Hashable, @@ -22,6 +23,7 @@ ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -394,7 +396,8 @@ def _validate_color_args(self): "color" in self.kwds or "colors" in self.kwds ) and self.colormap is not None: warnings.warn( - "'color' and 'colormap' cannot be used simultaneously. Using 'color'" + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(inspect.currentframe()), ) if "color" in self.kwds and self.style is not None: diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 9e459b82fec97..2f29aafbdf5cf 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -1,5 +1,6 @@ from __future__ import annotations +import inspect import itertools from typing import ( TYPE_CHECKING, @@ -15,6 +16,8 @@ import matplotlib.colors import numpy as np +from pandas.util._exceptions import find_stack_level + from pandas.core.dtypes.common import is_list_like import pandas.core.common as com @@ -121,7 +124,8 @@ def _derive_colors( elif color is not None: if colormap is not None: warnings.warn( - "'color' and 'colormap' cannot be used simultaneously. Using 'color'" + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(inspect.currentframe()), ) return _get_colors_from_color(color) else: diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index c5119205d1861..8f0ea70ab4124 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -234,6 +234,7 @@ def create_subplots( warnings.warn( "When passing multiple axes, layout keyword is ignored.", UserWarning, + stacklevel=find_stack_level(inspect.currentframe()), ) if sharex or sharey: warnings.warn( diff --git a/pandas/tests/apply/test_str.py b/pandas/tests/apply/test_str.py index 2af063dee8b55..38b2a5459eb1f 100644 --- a/pandas/tests/apply/test_str.py +++ b/pandas/tests/apply/test_str.py @@ -80,7 +80,9 @@ def test_apply_np_transformer(float_frame, op, how): if op in ["log", "sqrt"]: warn = RuntimeWarning - with tm.assert_produces_warning(warn): + with tm.assert_produces_warning(warn, check_stacklevel=False): + # float_frame fixture is defined in conftest.py, so we don't check the + # stacklevel as otherwise the test would fail. result = getattr(float_frame, how)(op) expected = getattr(np, op)(float_frame) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 60eef0d8097e4..0a2686a24c732 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -246,11 +246,11 @@ def test_isna(self, data_missing): self.assert_equal(sarr.isna(), expected) def test_fillna_limit_pad(self, data_missing): - with tm.assert_produces_warning(PerformanceWarning): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): super().test_fillna_limit_pad(data_missing) def test_fillna_limit_backfill(self, data_missing): - with tm.assert_produces_warning(PerformanceWarning): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): super().test_fillna_limit_backfill(data_missing) def test_fillna_no_op_returns_copy(self, data, request): @@ -258,11 +258,11 @@ def test_fillna_no_op_returns_copy(self, data, request): request.node.add_marker( pytest.mark.xfail(reason="returns array with different fill value") ) - with tm.assert_produces_warning(PerformanceWarning): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): super().test_fillna_no_op_returns_copy(data) def test_fillna_series_method(self, data_missing): - with tm.assert_produces_warning(PerformanceWarning): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): super().test_fillna_limit_backfill(data_missing) @pytest.mark.xfail(reason="Unsupported") @@ -373,7 +373,7 @@ def test_combine_first(self, data, request): super().test_combine_first(data) def test_searchsorted(self, data_for_sorting, as_series): - with tm.assert_produces_warning(PerformanceWarning): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): super().test_searchsorted(data_for_sorting, as_series) def test_shift_0_periods(self, data): diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index e7e971f957e48..40a107658231d 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -406,6 +406,7 @@ def test_astype_preserves_name(self, index, dtype): with tm.assert_produces_warning( warn, raise_on_extra_warnings=is_pyarrow_str, + check_stacklevel=False, ): result = index.astype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py index 8a3f8788a45aa..c8cef56c73902 100644 --- a/pandas/tests/io/parser/common/test_chunksize.py +++ b/pandas/tests/io/parser/common/test_chunksize.py @@ -192,8 +192,12 @@ def test_warn_if_chunks_have_mismatched_type(all_parsers): buf = StringIO(data) - with tm.assert_produces_warning(warning_type): - df = parser.read_csv(buf) + df = parser.read_csv_check_warnings( + warning_type, + r"Columns \(0\) have mixed types. " + "Specify dtype option on import or set low_memory=False.", + buf, + ) assert df.a.dtype == object diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index a0da3a7eaadce..a7cdc3c1a84d2 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -732,8 +732,15 @@ def test_no_header_two_extra_columns(all_parsers): ref = DataFrame([["foo", "bar", "baz"]], columns=column_names) stream = StringIO("foo,bar,baz,bam,blah") parser = all_parsers - with tm.assert_produces_warning(ParserWarning): - df = parser.read_csv(stream, header=None, names=column_names, index_col=False) + df = parser.read_csv_check_warnings( + ParserWarning, + "Length of header or names does not match length of data. " + "This leads to a loss of data with index_col=False.", + stream, + header=None, + names=column_names, + index_col=False, + ) tm.assert_frame_equal(df, ref) diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py index da53664a0278d..2c18d461cddf8 100644 --- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -103,10 +103,14 @@ def test_dtype_with_converters(all_parsers): 1.2,2.3""" # Dtype spec ignored if converted specified. - with tm.assert_produces_warning(ParserWarning): - result = parser.read_csv( - StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)} - ) + result = parser.read_csv_check_warnings( + ParserWarning, + "Both a converter and dtype were specified for column a " + "- only the converter will be used.", + StringIO(data), + dtype={"a": "i8"}, + converters={"a": lambda x: str(x)}, + ) expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 9a81790ca3bb0..ecc49ea8adb9f 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -59,15 +59,17 @@ def test_buffer_rd_bytes(c_parser_only): ) parser = c_parser_only - with tm.assert_produces_warning(RuntimeWarning): - # compression has no effect when passing a non-binary object as input - for _ in range(100): - try: - parser.read_csv( - StringIO(data), compression="gzip", delim_whitespace=True - ) - except Exception: - pass + for _ in range(100): + try: + parser.read_csv_check_warnings( + RuntimeWarning, + "compression has no effect when passing a non-binary object as input", + StringIO(data), + compression="gzip", + delim_whitespace=True, + ) + except Exception: + pass def test_delim_whitespace_custom_terminator(c_parser_only): diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py index 55b193903bce0..458d4116558e4 100644 --- a/pandas/tests/io/parser/test_dialect.py +++ b/pandas/tests/io/parser/test_dialect.py @@ -108,9 +108,14 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val kwds[arg] = "blah" with tm.with_csv_dialect(dialect_name, **dialect_kwargs): - with tm.assert_produces_warning(warning_klass): - result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwds) - tm.assert_frame_equal(result, expected) + result = parser.read_csv_check_warnings( + warning_klass, + "Conflicting values for", + StringIO(data), + dialect=dialect_name, + **kwds, + ) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( @@ -141,6 +146,11 @@ def test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning data = "a:b\n1:2" with tm.with_csv_dialect(dialect_name, **dialect_kwargs): - with tm.assert_produces_warning(warning_klass): - result = parser.read_csv(StringIO(data), dialect=dialect_name, **kwargs) - tm.assert_frame_equal(result, expected) + result = parser.read_csv_check_warnings( + warning_klass, + "Conflicting values for 'delimiter'", + StringIO(data), + dialect=dialect_name, + **kwargs, + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 5d2e5bccd9762..9c8809b6099f9 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1678,10 +1678,14 @@ def test_parse_delimited_date_swap_with_warning( parser = all_parsers expected = DataFrame({0: [expected]}, dtype="datetime64[ns]") warning_msg = "Specify a format to ensure consistent parsing" - with tm.assert_produces_warning(UserWarning, match=warning_msg): - result = parser.read_csv( - StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0] - ) + result = parser.read_csv_check_warnings( + UserWarning, + warning_msg, + StringIO(date_string), + header=None, + dayfirst=dayfirst, + parse_dates=[0], + ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_python_parser_only.py b/pandas/tests/io/parser/test_python_parser_only.py index 0717078a83a46..1d3392b124895 100644 --- a/pandas/tests/io/parser/test_python_parser_only.py +++ b/pandas/tests/io/parser/test_python_parser_only.py @@ -424,8 +424,9 @@ def test_on_bad_lines_callable_not_expected_length(python_parser_only): """ bad_sio = StringIO(data) - with tm.assert_produces_warning(ParserWarning, match="Length of header or names"): - result = parser.read_csv(bad_sio, on_bad_lines=lambda x: x) + result = parser.read_csv_check_warnings( + ParserWarning, "Length of header or names", bad_sio, on_bad_lines=lambda x: x + ) expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) tm.assert_frame_equal(result, expected) @@ -466,8 +467,14 @@ def test_index_col_false_and_header_none(python_parser_only): 0.5,0.03 0.1,0.2,0.3,2 """ - with tm.assert_produces_warning(ParserWarning, match="Length of header"): - result = parser.read_csv(StringIO(data), sep=",", header=None, index_col=False) + result = parser.read_csv_check_warnings( + ParserWarning, + "Length of header", + StringIO(data), + sep=",", + header=None, + index_col=False, + ) expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]}) tm.assert_frame_equal(result, expected) @@ -476,7 +483,8 @@ def test_header_int_do_not_infer_multiindex_names_on_different_line(python_parse # GH#46569 parser = python_parser_only data = StringIO("a\na,b\nc,d,e\nf,g,h") - with tm.assert_produces_warning(ParserWarning, match="Length of header"): - result = parser.read_csv(data, engine="python", index_col=False) + result = parser.read_csv_check_warnings( + ParserWarning, "Length of header", data, engine="python", index_col=False + ) expected = DataFrame({"a": ["a", "c", "f"]}) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index cb34cb6678a67..db85cb416145f 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -258,7 +258,11 @@ def test_round_trip_frame_string(self, df): # Two character separator is not supported in to_clipboard # Test that multi-character separators are not silently passed def test_excel_sep_warning(self, df): - with tm.assert_produces_warning(): + with tm.assert_produces_warning( + UserWarning, + match="to_clipboard in excel mode requires a single character separator.", + check_stacklevel=False, + ): df.to_clipboard(excel=True, sep=r"\t") # Separator is ignored when excel=False and should produce a warning diff --git a/pandas/tests/plotting/frame/test_hist_box_by.py b/pandas/tests/plotting/frame/test_hist_box_by.py index fe39c3d441396..e568016c858fd 100644 --- a/pandas/tests/plotting/frame/test_hist_box_by.py +++ b/pandas/tests/plotting/frame/test_hist_box_by.py @@ -164,7 +164,7 @@ def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df): def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): # GH 15079 # _check_plot_works adds an ax so catch warning. see GH #13188 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( hist_df.plot.hist, column=column, by=by, layout=layout ) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 9ca8a71ed1897..9112d5cb3368f 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -61,23 +61,23 @@ def test_boxplot_legacy1(self): _check_plot_works(df.boxplot, return_type="dict") _check_plot_works(df.boxplot, column=["one", "two"], return_type="dict") # _check_plot_works adds an ax so catch warning. see GH #13188 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.boxplot, column=["one", "two"], by="indic") _check_plot_works(df.boxplot, column="one", by=["indic", "indic2"]) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.boxplot, by="indic") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.boxplot, by=["indic", "indic2"]) _check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict") _check_plot_works(df.boxplot, notch=1, return_type="dict") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.boxplot, by="indic", notch=1) def test_boxplot_legacy2(self): df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"]) df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) df["Y"] = Series(["A"] * 10) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.boxplot, by="X") # When ax is supplied and required number of axes is 1, @@ -330,7 +330,7 @@ def test_boxplot_group_xlabel_ylabel(self, vert): class TestDataFrameGroupByPlots(TestPlotBase): def test_boxplot_legacy1(self, hist_df): grouped = hist_df.groupby(by="gender") - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(grouped.boxplot, return_type="axes") self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2)) axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") @@ -341,7 +341,7 @@ def test_boxplot_legacy2(self): tuples = zip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) grouped = df.groupby(level=1) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(grouped.boxplot, return_type="axes") self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3)) @@ -352,7 +352,7 @@ def test_boxplot_legacy3(self): tuples = zip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) grouped = df.unstack(level=1).groupby(level=0, axis=1) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(grouped.boxplot, return_type="axes") self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2)) axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") @@ -437,20 +437,20 @@ def test_grouped_box_layout(self, hist_df): df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1)) # _check_plot_works adds an ax so catch warning. see GH #13188 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): box = _check_plot_works( df.groupby("gender").boxplot, column="height", return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): box = _check_plot_works( df.groupby("category").boxplot, column="height", return_type="dict" ) self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2)) # GH 6769 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): box = _check_plot_works( df.groupby("classroom").boxplot, column="height", return_type="dict" ) @@ -473,7 +473,7 @@ def test_grouped_box_layout(self, hist_df): ) self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): box = _check_plot_works( df.groupby("category").boxplot, column="height", @@ -481,7 +481,7 @@ def test_grouped_box_layout(self, hist_df): return_type="dict", ) self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): box = _check_plot_works( df.groupby("category").boxplot, column="height", diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 0955e7808f3f6..9c11d589716fe 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -31,9 +31,9 @@ def test_hist_legacy(self, ts): _check_plot_works(ts.hist, grid=False) _check_plot_works(ts.hist, figsize=(8, 10)) # _check_plot_works adds an ax so catch warning. see GH #13188 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(ts.hist, by=ts.index.month) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(ts.hist, by=ts.index.month, bins=5) fig, ax = self.plt.subplots(1, 1) @@ -74,31 +74,31 @@ def test_hist_layout_with_by(self, hist_df): # _check_plot_works adds an `ax` kwarg to the method call # so we get a warning about an axis being cleared, even # though we don't explicing pass one, see GH #13188 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1)) self._check_axes_shape(axes, axes_num=2, layout=(3, 1)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1)) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1)) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1)) self._check_axes_shape(axes, axes_num=4, layout=(3, 2)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4)) self._check_axes_shape(axes, axes_num=4, layout=(1, 4)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) @@ -235,7 +235,7 @@ class TestDataFramePlots(TestPlotBase): def test_hist_df_legacy(self, hist_df): from matplotlib.patches import Rectangle - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(hist_df.hist) # make sure layout is handled @@ -248,7 +248,7 @@ def test_hist_df_legacy(self, hist_df): dtype=np.int64, ) ) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.hist, grid=False) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) assert not axes[1, 1].get_visible() @@ -267,20 +267,20 @@ def test_hist_df_legacy(self, hist_df): dtype=np.int64, ) ) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.hist, layout=(4, 2)) self._check_axes_shape(axes, axes_num=6, layout=(4, 2)) # make sure sharex, sharey is handled - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.hist, sharex=True, sharey=True) # handle figsize arg - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.hist, figsize=(8, 10)) # check bins argument - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): _check_plot_works(df.hist, bins=5) # make sure xlabelsize and xrot are handled @@ -659,13 +659,13 @@ def test_grouped_hist_layout(self, hist_df): with pytest.raises(ValueError, match=msg): df.hist(column="height", by=df.category, layout=(-1, -1)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( df.hist, column="height", by=df.gender, layout=(2, 1) ) self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( df.hist, column="height", by=df.gender, layout=(2, -1) ) @@ -682,14 +682,14 @@ def test_grouped_hist_layout(self, hist_df): tm.close() # GH 6769 - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( df.hist, column="height", by="classroom", layout=(2, 2) ) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) # without column - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works(df.hist, by="classroom") self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index ab8e64be648d4..7dd9b78bab1cd 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -107,7 +107,7 @@ def test_scatter_matrix_axis(self, pass_axis): df = DataFrame(np.random.randn(100, 3)) # we are plotting multiples on a sub-plot - with tm.assert_produces_warning(UserWarning, raise_on_extra_warnings=True): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( scatter_matrix, filterwarnings="always", @@ -125,7 +125,7 @@ def test_scatter_matrix_axis(self, pass_axis): df[0] = (df[0] - 2) / 3 # we are plotting multiples on a sub-plot - with tm.assert_produces_warning(UserWarning): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): axes = _check_plot_works( scatter_matrix, filterwarnings="always", diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 35174d92b4125..3edabc7c089e1 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -235,7 +235,7 @@ def __init__(self, index, warn: bool = True) -> None: "warn is deprecated (and never implemented) and " "will be removed in a future version.", FutureWarning, - stacklevel=3, + stacklevel=find_stack_level(inspect.currentframe()), ) self.warn = warn diff --git a/scripts/list_future_warnings.sh b/scripts/list_future_warnings.sh index 121f4f5a92abb..dc3d0b59b618b 100755 --- a/scripts/list_future_warnings.sh +++ b/scripts/list_future_warnings.sh @@ -6,7 +6,7 @@ # This is useful to detect features that have been deprecated, and should be # removed from the code. For example, if a line of code contains: # -# warning.warn('Method deprecated', FutureWarning, stacklevel=2) +# warning.warn('Method deprecated', FutureWarning, stacklevel=find_stack_level(inspect.currentframe())) # # Which is released in Pandas 0.20.0, then it is expected that the method # is removed before releasing Pandas 0.24.0, including the warning. If it diff --git a/scripts/validate_min_versions_in_sync.py b/scripts/validate_min_versions_in_sync.py index 1b937673672f8..cb6a204094bf5 100755 --- a/scripts/validate_min_versions_in_sync.py +++ b/scripts/validate_min_versions_in_sync.py @@ -26,9 +26,11 @@ # in pre-commit environment sys.path.append("pandas/compat") sys.path.append("pandas/util") +import _exceptions import version sys.modules["pandas.util.version"] = version +sys.modules["pandas.util._exceptions"] = _exceptions import _optional
- [ ] closes #47919 (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47998
2022-08-07T09:51:14Z
2022-08-17T22:00:08Z
2022-08-17T22:00:08Z
2022-08-17T22:00:08Z
Added improvements in to_datetime Error reporting message
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index fe4078f611f7e..55057ff628619 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -595,7 +595,7 @@ cpdef array_to_datetime( continue elif is_raise: raise ValueError( - f"time data {val} doesn't match format specified" + f"time data \"{val}\" at position {i} doesn't match format specified" ) return values, tz_out @@ -611,7 +611,7 @@ cpdef array_to_datetime( if is_coerce: iresult[i] = NPY_NAT continue - raise TypeError("invalid string coercion to datetime") + raise TypeError(f"invalid string coercion to datetime for \"{val}\" at position {i}") if tz is not None: seen_datetime_offset = True diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 1d5bbf87090eb..8c223020c4012 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -286,7 +286,7 @@ def parse_datetime_string( datetime dt if not _does_string_look_like_datetime(date_string): - raise ValueError('Given date string not likely a datetime.') + raise ValueError(f'Given date string {date_string} not likely a datetime') if does_string_look_like_time(date_string): # use current datetime as default, not pass _DEFAULT_DATETIME @@ -320,7 +320,7 @@ def parse_datetime_string( except TypeError: # following may be raised from dateutil # TypeError: 'NoneType' object is not iterable - raise ValueError('Given date string not likely a datetime.') + raise ValueError(f'Given date string {date_string} not likely a datetime') return dt @@ -396,7 +396,7 @@ cdef parse_datetime_string_with_reso( int out_tzoffset if not _does_string_look_like_datetime(date_string): - raise ValueError('Given date string not likely a datetime.') + raise ValueError(f'Given date string {date_string} not likely a datetime') parsed, reso = _parse_delimited_date(date_string, dayfirst) if parsed is not None: diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 20d6b9e77a034..c9e28f8249c1b 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -301,7 +301,7 @@ def test_invalid_arguments(self): with pytest.raises(ValueError, match=msg): Period(month=1) - msg = "Given date string not likely a datetime" + msg = "Given date string -2000 not likely a datetime" with pytest.raises(ValueError, match=msg): Period("-2000", "A") msg = "day is out of range for month" diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 4050817b39b88..7e698b7a6b83d 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -951,7 +951,7 @@ def test_datetime_invalid_scalar(self, value, format, infer): msg = ( "is a bad directive in format|" "second must be in 0..59|" - "Given date string not likely a datetime" + f"Given date string {value} not likely a datetime" ) with pytest.raises(ValueError, match=msg): to_datetime( @@ -1003,7 +1003,7 @@ def test_datetime_invalid_index(self, values, format, infer): msg = ( "is a bad directive in format|" - "Given date string not likely a datetime|" + f"Given date string {values[0]} not likely a datetime|" "second must be in 0..59" ) with pytest.raises(ValueError, match=msg): @@ -2220,7 +2220,7 @@ def test_day_not_in_month_raise(self, cache): @pytest.mark.parametrize("arg", ["2015-02-29", "2015-02-32", "2015-04-31"]) def test_day_not_in_month_raise_value(self, cache, arg): - msg = f"time data {arg} doesn't match format specified" + msg = f'time data "{arg}" at position 0 doesn\'t match format specified' with pytest.raises(ValueError, match=msg): to_datetime(arg, errors="raise", format="%Y-%m-%d", cache=cache)
- closes https://github.com/pandas-dev/pandas/issues/16757 - Changes description : For issue https://github.com/pandas-dev/pandas/issues/16757 - added more details in error message about field and its position causing the issue in pandas/_libs/tslib.pyx in array_to_datetime - Examples of changed error messages | Code | Old Error ( on 1.4.3 ) | New Error | | ------------- | ------------- | ------------- | | pd.to_datetime(pd.Series(['2016-01-01', 'unparseable']), errors='raise') | dateutil.parser._parser.ParserError: Unknown string format: unparseable | dateutil.parser._parser.ParserError: Unknown string format: unparseable present at position 1 | | pd.to_datetime(pd.Series(['2016-01-01', 'unparseable']), format='%Y-%m-%d', errors='raise') | ValueError: time data unparseable doesn't match format specified | ValueError: time data "unparseable" at position 1 doesn't match format specified | | pd.to_datetime(pd.Series(['2016-01-01', '12']), errors='raise') | ValueError: Given date string not likely a datetime. | ValueError: Given date string 12 not likely a datetime present at position 1 |
https://api.github.com/repos/pandas-dev/pandas/pulls/47995
2022-08-06T21:56:14Z
2022-08-16T18:35:24Z
2022-08-16T18:35:24Z
2022-08-31T18:22:03Z
BUG: compare returning all nan columns when comparing ea and np dtypes
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bdf811f6a8f6a..790aa5cefb29f 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -908,6 +908,8 @@ Indexing - Bug in :meth:`DataFrame.mask` with ``inplace=True`` and ``ExtensionDtype`` columns incorrectly raising (:issue:`45577`) - Bug in getting a column from a DataFrame with an object-dtype row index with datetime-like values: the resulting Series now preserves the exact object-dtype Index from the parent DataFrame (:issue:`42950`) - Bug in :meth:`DataFrame.__getattribute__` raising ``AttributeError`` if columns have ``"string"`` dtype (:issue:`46185`) +- Bug in :meth:`DataFrame.compare` returning all ``NaN`` column when comparing extension array dtype and numpy dtype (:issue:`44014`) +- Bug in :meth:`DataFrame.where` setting wrong values with ``"boolean"`` mask for numpy dtype (:issue:`44014`) - Bug in indexing on a :class:`DatetimeIndex` with a ``np.str_`` key incorrectly raising (:issue:`45580`) - Bug in :meth:`CategoricalIndex.get_indexer` when index contains ``NaN`` values, resulting in elements that are in target but not present in the index to be mapped to the index of the NaN element, instead of -1 (:issue:`45361`) - Bug in setting large integer values into :class:`Series` with ``float32`` or ``float16`` dtype incorrectly altering these values instead of coercing to ``float64`` dtype (:issue:`45844`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a70f9b7b20d5a..9dd3c780aa59b 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1062,6 +1062,8 @@ def where(self, other, cond, _downcast="infer") -> list[Block]: transpose = self.ndim == 2 + cond = extract_bool_array(cond) + # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index aa55a7c91d0e6..fba8978d2128c 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -1046,3 +1046,13 @@ def test_where_mask_deprecated(frame_or_series): with tm.assert_produces_warning(FutureWarning): obj.mask(mask, -1, errors="raise") + + +def test_where_producing_ea_cond_for_np_dtype(): + # GH#44014 + df = DataFrame({"a": Series([1, pd.NA, 2], dtype="Int64"), "b": [1, 2, 3]}) + result = df.where(lambda x: x.apply(lambda y: y > 1, axis=1)) + expected = DataFrame( + {"a": Series([pd.NA, pd.NA, 2], dtype="Int64"), "b": [np.nan, 2, 3]} + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_compare.py b/pandas/tests/frame/methods/test_compare.py index 609242db453ba..55e5db9603fe5 100644 --- a/pandas/tests/frame/methods/test_compare.py +++ b/pandas/tests/frame/methods/test_compare.py @@ -236,3 +236,19 @@ def test_invalid_input_result_names(result_names): ), ): df1.compare(df2, result_names=result_names) + + +def test_compare_ea_and_np_dtype(): + # GH#44014 + df1 = pd.DataFrame({"a": [4.0, 4], "b": [1.0, 2]}) + df2 = pd.DataFrame({"a": pd.Series([1, pd.NA], dtype="Int64"), "b": [1.0, 2]}) + result = df1.compare(df2, keep_shape=True) + expected = pd.DataFrame( + { + ("a", "self"): [4.0, np.nan], + ("a", "other"): pd.Series([1, pd.NA], dtype="Int64"), + ("b", "self"): np.nan, + ("b", "other"): np.nan, + } + ) + tm.assert_frame_equal(result, expected)
- [x] closes #44014 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47994
2022-08-06T21:41:39Z
2022-08-08T21:25:56Z
2022-08-08T21:25:56Z
2022-08-08T21:29:06Z
ENH: DataFrame.drop copy kwd
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index a0d33cb513722..64b367f2ed36d 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -293,6 +293,7 @@ Other enhancements - :meth:`RangeIndex.union` now can return a :class:`RangeIndex` instead of a :class:`Int64Index` if the resulting values are equally spaced (:issue:`47557`, :issue:`43885`) - :meth:`DataFrame.compare` now accepts an argument ``result_names`` to allow the user to specify the result's names of both left and right DataFrame which are being compared. This is by default ``'self'`` and ``'other'`` (:issue:`44354`) - :meth:`Series.add_suffix`, :meth:`DataFrame.add_suffix`, :meth:`Series.add_prefix` and :meth:`DataFrame.add_prefix` support a ``copy`` argument. If ``False``, the underlying data is not copied in the returned object (:issue:`47934`) +- :meth:`DataFrame.drop` with ``axis=1`` can now accept ``copy=False`` to prevent a copy of the underlying data (:issue:`47993`) .. --------------------------------------------------------------------------- .. _whatsnew_150.notable_bug_fixes: diff --git a/pandas/core/base.py b/pandas/core/base.py index f7e6c4434da32..e7bef361252b4 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -221,9 +221,7 @@ def _obj_with_exclusions(self): if len(self.exclusions) > 0: # equivalent to `self.obj.drop(self.exclusions, axis=1) # but this avoids consolidating and making a copy - # TODO: following GH#45287 can we now use .drop directly without - # making a copy? - return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True) + return self.obj.drop(self.exclusions, axis=1, copy=False) else: return self.obj diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 49e5bc24786dd..010a537b45c9b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5096,6 +5096,7 @@ def drop( level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., + copy: bool | lib.NoDefault = ..., ) -> None: ... @@ -5110,6 +5111,7 @@ def drop( level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame: ... @@ -5124,6 +5126,7 @@ def drop( level: Level | None = ..., inplace: bool = ..., errors: IgnoreRaise = ..., + copy: bool | lib.NoDefault = ..., ) -> DataFrame | None: ... @@ -5139,6 +5142,8 @@ def drop( # type: ignore[override] level: Level | None = None, inplace: bool = False, errors: IgnoreRaise = "raise", + *, + copy: bool | lib.NoDefault = lib.no_default, ) -> DataFrame | None: """ Drop specified labels from rows or columns. @@ -5171,6 +5176,10 @@ def drop( # type: ignore[override] errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. + copy : bool, default True + If False and axis == 1, do not make a copy of the underlying data. + + .. versionadded:: 1.5.0 Returns ------- @@ -5285,6 +5294,7 @@ def drop( # type: ignore[override] level=level, inplace=inplace, errors=errors, + copy=copy, ) @overload diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 003fe2571401f..ba03ed8879611 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4376,6 +4376,7 @@ def drop( level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., + copy: bool_t | lib.NoDefault = ..., ) -> None: ... @@ -4390,6 +4391,7 @@ def drop( level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., + copy: bool_t | lib.NoDefault = ..., ) -> NDFrameT: ... @@ -4404,6 +4406,7 @@ def drop( level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., + copy: bool_t | lib.NoDefault = ..., ) -> NDFrameT | None: ... @@ -4417,9 +4420,17 @@ def drop( level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", + *, + copy: bool_t | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") + if inplace: + if copy is not lib.no_default: + raise ValueError("Cannot pass both inplace=True and copy") + copy = True + elif copy is lib.no_default: + copy = True if labels is not None: if index is not None or columns is not None: @@ -4437,7 +4448,9 @@ def drop( for axis, labels in axes.items(): if labels is not None: - obj = obj._drop_axis(labels, axis, level=level, errors=errors) + obj = obj._drop_axis( + labels, axis, level=level, errors=errors, only_slice=not copy + ) if inplace: self._update_inplace(obj) diff --git a/pandas/core/series.py b/pandas/core/series.py index 206fcbe05d006..7fe5f9a86594c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4992,7 +4992,8 @@ def reindex(self, *args, **kwargs) -> Series: kwargs.update({"index": index}) return super().reindex(**kwargs) - @overload + # error: Signature of "drop" incompatible with supertype "NDFrame" [override] + @overload # type: ignore[override] def drop( self, labels: IndexLabel = ..., @@ -5141,6 +5142,7 @@ def drop( # type: ignore[override] level=level, inplace=inplace, errors=errors, + copy=lib.no_default, ) @overload diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 50b60f9e06ef1..a4aee6bda679d 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -67,6 +67,25 @@ def test_drop_with_non_unique_datetime_index_and_invalid_keys(): class TestDataFrameDrop: + def test_drop_copy(self): + df = DataFrame( + [[1, 2, 3], [3, 4, 5], [5, 6, 7]], + index=["a", "b", "c"], + columns=["d", "e", "f"], + ) + + msg = "Cannot pass both inplace=True and copy" + with pytest.raises(ValueError, match=msg): + df.drop("d", axis=1, inplace=True, copy=True) + with pytest.raises(ValueError, match=msg): + df.drop("d", axis=1, inplace=True, copy=False) + + res = df.drop("d", axis=1, copy=True) + assert not any(tm.shares_memory(res[c], df[c]) for c in res.columns) + + res = df.drop("d", axis=1, copy=False) + assert all(tm.shares_memory(res[c], df[c]) for c in res.columns) + def test_drop_names(self): df = DataFrame( [[1, 2, 3], [3, 4, 5], [5, 6, 7]],
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47993
2022-08-06T20:47:30Z
2022-09-14T21:31:19Z
null
2022-09-14T21:31:21Z
WEB: Fix link to PDEP 0001
diff --git a/web/pandas/about/roadmap.md b/web/pandas/about/roadmap.md index 6e922d01518ba..64e730fc42eed 100644 --- a/web/pandas/about/roadmap.md +++ b/web/pandas/about/roadmap.md @@ -17,7 +17,7 @@ tracked on our [issue tracker](https://github.com/pandas-dev/pandas/issues). The roadmap is defined as a set of major enhancement proposals named PDEPs. For more information about PDEPs, and how to submit one, please refer to -[PEDP-1](/pdeps/accepted/0001-puropose-and-guidelines.html). +[PEDP-1](/pdeps/0001-purpose-and-guidelines.html). ## PDEPs
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Ref: https://github.com/pandas-dev/pandas/pull/47938#issuecomment-1207216768
https://api.github.com/repos/pandas-dev/pandas/pulls/47991
2022-08-06T13:45:44Z
2022-08-06T15:36:55Z
2022-08-06T15:36:55Z
2022-08-06T18:49:55Z
Warn when creating `Period` with a string that includes timezone information
diff --git a/doc/source/whatsnew/v1.5.3.rst b/doc/source/whatsnew/v1.5.3.rst index 581d28e10bd67..ab80e21e9adce 100644 --- a/doc/source/whatsnew/v1.5.3.rst +++ b/doc/source/whatsnew/v1.5.3.rst @@ -29,6 +29,7 @@ Bug fixes - Bug in :meth:`.Styler.to_excel` leading to error when unrecognized ``border-style`` (e.g. ``"hair"``) provided to Excel writers (:issue:`48649`) - Bug when chaining several :meth:`.Styler.concat` calls, only the last styler was concatenated (:issue:`49207`) - Fixed bug when instantiating a :class:`DataFrame` subclass inheriting from ``typing.Generic`` that triggered a ``UserWarning`` on python 3.11 (:issue:`49649`) +- :class:`Period` now raises a warning when created with data that contains timezone information. This is necessary because :class:`Period`, :class:`PeriodArray` and :class:`PeriodIndex` do not support timezones and hence drop any timezone information used when creating them. (:issue:`47005`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 86fa965be92c4..23c4629d757f2 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1,3 +1,5 @@ +import warnings + cimport numpy as cnp from cpython.object cimport ( Py_EQ, @@ -2613,6 +2615,13 @@ class Period(_Period): raise ValueError(msg) if ordinal is None: + if dt.tzinfo: + # GH 47005 + warnings.warn( + "The pandas.Period class does not support timezones. " + f"The timezone given in '{value}' will be ignored.", + UserWarning + ) base = freq_to_dtype_code(freq) ordinal = period_ordinal(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index bdaaeb20b3508..06e4f02c576c1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -511,7 +511,11 @@ def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime): ------- lower, upper: pd.Timestamp """ - per = Period(parsed, freq=reso.attr_abbrev) + with warnings.catch_warnings(): + # Period looses tzinfo. We ignore the corresponding warning here, + # and add the lost tzinfo below. + warnings.simplefilter("ignore", UserWarning) + per = Period(parsed, freq=reso.attr_abbrev) start, end = per.start_time, per.end_time # GH 24076 diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 69f46a333503d..06e98f72b7ff6 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -15,6 +15,7 @@ Generator, cast, ) +import warnings from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates @@ -262,7 +263,9 @@ def get_datevalue(date, freq): if isinstance(date, Period): return date.asfreq(freq).ordinal elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): - return Period(date, freq).ordinal + with warnings.catch_warnings(): + warnings.simplefilter("ignore", UserWarning) + return Period(date, freq).ordinal elif ( is_integer(date) or is_float(date) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index fbd6f362bd9e7..dd7f30551be0f 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -1007,7 +1007,7 @@ class TestPeriodArray(SharedTests): index_cls = PeriodIndex array_cls = PeriodArray scalar_type = Period - example_dtype = PeriodIndex([], freq="W").dtype + example_dtype = PeriodIndex([], freq="Q").dtype @pytest.fixture def arr1d(self, period_index): diff --git a/pandas/tests/indexes/datetimes/methods/test_to_period.py b/pandas/tests/indexes/datetimes/methods/test_to_period.py index e8048e63afbf7..99d692750c08d 100644 --- a/pandas/tests/indexes/datetimes/methods/test_to_period.py +++ b/pandas/tests/indexes/datetimes/methods/test_to_period.py @@ -117,11 +117,13 @@ def test_to_period_millisecond(self): ) with tm.assert_produces_warning(UserWarning): - # warning that timezone info will be lost + # GH 21333 - warning that timezone info will be lost period = index.to_period(freq="L") assert 2 == len(period) - assert period[0] == Period("2007-01-01 10:11:12.123Z", "L") - assert period[1] == Period("2007-01-01 10:11:13.789Z", "L") + with tm.assert_produces_warning(UserWarning): + # GH 47005 - warning that timezone info will be lost + assert period[0] == Period("2007-01-01 10:11:12.123Z", "L") + assert period[1] == Period("2007-01-01 10:11:13.789Z", "L") def test_to_period_microsecond(self): index = DatetimeIndex( @@ -132,11 +134,13 @@ def test_to_period_microsecond(self): ) with tm.assert_produces_warning(UserWarning): - # warning that timezone info will be lost + # GH 21333 - warning that timezone info will be lost period = index.to_period(freq="U") assert 2 == len(period) - assert period[0] == Period("2007-01-01 10:11:12.123456Z", "U") - assert period[1] == Period("2007-01-01 10:11:13.789123Z", "U") + with tm.assert_produces_warning(UserWarning): + # GH 47005 - warning that timezone info will be lost + assert period[0] == Period("2007-01-01 10:11:12.123456Z", "U") + assert period[1] == Period("2007-01-01 10:11:13.789123Z", "U") @pytest.mark.parametrize( "tz", diff --git a/pandas/tests/indexes/period/test_period_range.py b/pandas/tests/indexes/period/test_period_range.py index c94ddf57c0ee1..15334ca2e8c53 100644 --- a/pandas/tests/indexes/period/test_period_range.py +++ b/pandas/tests/indexes/period/test_period_range.py @@ -20,7 +20,17 @@ def test_required_arguments(self): with pytest.raises(ValueError, match=msg): period_range("2011-1-1", "2012-1-1", "B") - @pytest.mark.parametrize("freq", ["D", "W", "M", "Q", "A"]) + @pytest.mark.parametrize( + "freq", + [ + "D", + # Parsing week strings is not fully supported. See GH 48000. + pytest.param("W", marks=pytest.mark.filterwarnings("ignore:.*timezone")), + "M", + "Q", + "A", + ], + ) def test_construction_from_string(self, freq): # non-empty expected = date_range( @@ -119,3 +129,13 @@ def test_errors(self): msg = "periods must be a number, got foo" with pytest.raises(TypeError, match=msg): period_range(start="2017Q1", periods="foo") + + +def test_range_tz(): + # GH 47005 Time zone should be ignored with warning. + with tm.assert_produces_warning(UserWarning): + pi_tz = period_range( + "2022-01-01 06:00:00+02:00", "2022-01-01 09:00:00+02:00", freq="H" + ) + pi_naive = period_range("2022-01-01 06:00:00", "2022-01-01 09:00:00", freq="H") + tm.assert_index_equal(pi_tz, pi_naive) diff --git a/pandas/tests/indexing/test_datetime.py b/pandas/tests/indexing/test_datetime.py index dc2fe85679181..05d7fe79c1318 100644 --- a/pandas/tests/indexing/test_datetime.py +++ b/pandas/tests/indexing/test_datetime.py @@ -170,3 +170,11 @@ def test_getitem_str_slice_millisecond_resolution(self, frame_or_series): ], ) tm.assert_equal(result, expected) + + +def test_slice_with_datestring_tz(): + # GH 24076 + # GH 16785 + df = DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific")) + sliced = df["2019-01-01 12:00:00+04:00":"2019-01-01 13:00:00+04:00"] + tm.assert_frame_equal(sliced, df) diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index e32708c4402e4..1c69bcd62b4eb 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -263,12 +263,16 @@ def test_with_local_timezone_pytz(self): series = Series(1, index=index) series = series.tz_convert(local_timezone) - result = series.resample("D", kind="period").mean() - - # Create the expected series - # Index is moved back a day with the timezone conversion from UTC to - # Pacific - expected_index = period_range(start=start, end=end, freq="D") - offsets.Day() + # see gh-47005 + with tm.assert_produces_warning(UserWarning): + result = series.resample("D", kind="period").mean() + + # Create the expected series + # Index is moved back a day with the timezone conversion from UTC to + # Pacific + expected_index = ( + period_range(start=start, end=end, freq="D") - offsets.Day() + ) expected = Series(1.0, index=expected_index) tm.assert_series_equal(result, expected) @@ -304,14 +308,16 @@ def test_with_local_timezone_dateutil(self): series = Series(1, index=index) series = series.tz_convert(local_timezone) - result = series.resample("D", kind="period").mean() - - # Create the expected series - # Index is moved back a day with the timezone conversion from UTC to - # Pacific - expected_index = ( - period_range(start=start, end=end, freq="D", name="idx") - offsets.Day() - ) + # see gh-47005 + with tm.assert_produces_warning(UserWarning): + result = series.resample("D", kind="period").mean() + + # Create the expected series + # Index is moved back a day with the timezone conversion from UTC to + # Pacific + expected_index = ( + period_range(start=start, end=end, freq="D", name="idx") - offsets.Day() + ) expected = Series(1.0, index=expected_index) tm.assert_series_equal(result, expected) @@ -504,7 +510,10 @@ def test_resample_tz_localized(self): tm.assert_series_equal(result, expected) # for good measure - result = s.resample("D", kind="period").mean() + # see gh-47005 + with tm.assert_produces_warning(UserWarning): + result = s.resample("D", kind="period").mean() + ex_index = period_range("2001-09-20", periods=1, freq="D") expected = Series([1.5], index=ex_index) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 112f23b3b0f16..2b749e2d5f31c 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1555,3 +1555,18 @@ def test_invalid_frequency_error_message(): msg = "Invalid frequency: <WeekOfMonth: week=0, weekday=0>" with pytest.raises(ValueError, match=msg): Period("2012-01-02", freq="WOM-1MON") + + [email protected]( + "val", + [ + ("20220101T123456", "Z"), + ("2012-12-12T06:06:06", "-06:00"), + ], +) +def test_period_with_timezone(val): + # GH 47005 Time zone should be ignored with warning. + with tm.assert_produces_warning(UserWarning): + p_tz = Period("".join(val), freq="s") + p_naive = Period(val[0], freq="s") + assert p_tz == p_naive
- [x] closes #47005 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. -- no new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. ## ~~Note on the new `DateTimeWarning`~~ ~~I added the `DateTimeWarning` for this specific case and the warnings raised in #22549 because the commonly used `UserWarning` is often not appropriate. There are lots of conceivable situations where this warning may be raised due to design choices in Pandas as opposed to user error. `DateTimeWarning` inherits from `UserWarning` to remain backwards compatible. This trail of thought applies to much of the datetime functionality in Pandas but that's beyond the scope of this PR.~~
https://api.github.com/repos/pandas-dev/pandas/pulls/47990
2022-08-06T13:42:20Z
2023-01-16T19:39:52Z
null
2023-03-06T08:14:31Z
DOC: Add tuple description to allowed inputs for iloc #47799
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index fa1ad7ce3c874..4e242e33627a4 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -155,6 +155,8 @@ def iloc(self) -> _iLocIndexer: DataFrame) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. + - A tuple of row and column indexes. The tuple elements consist of one of the + above inputs, e.g. ``(0, 1)``. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds
- [x] closes #47799 - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit).
https://api.github.com/repos/pandas-dev/pandas/pulls/47989
2022-08-06T08:34:49Z
2022-08-08T21:35:57Z
2022-08-08T21:35:57Z
2022-08-08T21:36:04Z
PDEP-0002 Build System Overhaul
diff --git a/web/pandas/pdeps/0002-build-system-overhaul.md b/web/pandas/pdeps/0002-build-system-overhaul.md new file mode 100644 index 0000000000000..678614b818207 --- /dev/null +++ b/web/pandas/pdeps/0002-build-system-overhaul.md @@ -0,0 +1,49 @@ +# PDEP-2: Build System Overhaul + + +- Created: 5 August 2022 +- Status: Under discussion +- Discussion: [#47380](https://github.com/pandas-dev/pandas/pull/47380) +- Author: [Will Ayd](https://github.com/willayd) +- Revision: 1 + +# Abstract + +This PDEP proposes replacing pandas build system away from its legacy setuptools-based implementation to either a CMake or meson build system. + +# Detailed Description + +Pandas has long used setuptools as its build system, likely due to this historically being the only feasible option. Within setuptools, much of the build system logic ends up getting placed in ``setup.py`` with limited options for extension or customization. Pandas in turn has a monolithic setup.py that has its own quirks. For instance, parallel compilation "works" but only during development (yielding very long source distribution installs) and not without its own [bugs](see https://github.com/pandas-dev/pandas/issues/30873)). + +As Python packaging has evolved, so too has its ability to leverage other build, packaging and installation systems. [PEP-517](https://peps.python.org/pep-0517/) and [PEP-518](https://peps.python.org/pep-0518/) (which provides more history on setuptools) have standardized how libraries can implement and extend their own build systems. + +Given the opportunity to decouple a hard setuptools dependency, this PEP proposes pandas explores other popular build tools, with the goals of providing a better developer and end-user experience. The two main tools discussed so far are [CMake](https://cmake.org/) and [Meson](https://mesonbuild.com/Python-module.html) + +# CMake + +CMake is a popular build system, particularly in the C++ world. It is the build tool of choice for [Apache Arrow](https://arrow.apache.org/). CMake hearkens back to the early 2000s and has a very large ecosystem of documentation, books and training resources. [Why CMake?](https://cmake.org/cmake/help/book/mastering-cmake/chapter/Why%20CMake.html) within their own documentation will provide users with history and motivation for use. + +A reference implementation of CMake for pandas can be found at [#47380](https://github.com/pandas-dev/pandas/pull/47380). + +# Meson + +Meson is a more recent entry into the build system world. Comparisons to other build systems can be found in the [Meson documentation](https://mesonbuild.com/Comparisons.html). One of the more attractive points to Meson is that its syntax is much more Pythonic in nature than the DSL offered by CMake. Meson is used by SciPy and NumPy, with scikit-learn [likely moving](https://github.com/pandas-dev/pandas/pull/47380#issuecomment-1162817318) to Meson in the future. + +A reference implementation of Meson for pandas can be found [here](https://github.com/lithomas1/pandas/pull/19) + +# Consideration Points + +## Syntax Differences + +Meson has a language heavily influenced by Python. CMake by comparison has its own DSL, with relatively limited support for lists and little to no dict support. For a Python developer, Meson would seem more natural at first glance. + +## Popularity + +When looking at [StackOverflow data,](https://data.stackexchange.com/stackoverflow/query/1280378/number-of-stackoverflow-quesions-total-and-answered-on-bazel-cmake-meson-and), CMake is far and away the most tagged build system. As of August 5, 2022, the number of tagged answers for popular build systems is: + +|Build System|Answered Questions| +|---|---| +|CMake|18363| +|MSBuild|12351| +|Bazel|2224| +|Meson|295|
Cmake POC: #47380 Meson POC: https://github.com/lithomas1/pandas/pull/19
https://api.github.com/repos/pandas-dev/pandas/pulls/47988
2022-08-06T02:30:16Z
2022-08-23T04:27:54Z
null
2023-04-12T20:17:39Z
BUG: iloc.setitem raising NotImplementedError for all null slice with one column df
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bdf811f6a8f6a..8a2a4ea511ec7 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -889,6 +889,8 @@ Indexing - Bug in setting a NA value (``None`` or ``np.nan``) into a :class:`Series` with int-based :class:`IntervalDtype` incorrectly casting to object dtype instead of a float-based :class:`IntervalDtype` (:issue:`45568`) - Bug in indexing setting values into an ``ExtensionDtype`` column with ``df.iloc[:, i] = values`` with ``values`` having the same dtype as ``df.iloc[:, i]`` incorrectly inserting a new array instead of setting in-place (:issue:`33457`) - Bug in :meth:`Series.__setitem__` with a non-integer :class:`Index` when using an integer key to set a value that cannot be set inplace where a ``ValueError`` was raised instead of casting to a common dtype (:issue:`45070`) +- Bug in :meth:`DataFrame.loc` raising ``NotImplementedError`` when setting value into one column :class:`DataFrame` with all null slice as column indexer (:issue:`45469`) +- Bug in :meth:`DataFrame.loc` not casting ``None`` to ``NA`` when setting value a list into :class:`DataFrame` (:issue:`47987`) - Bug in :meth:`Series.__setitem__` when setting incompatible values into a ``PeriodDtype`` or ``IntervalDtype`` :class:`Series` raising when indexing with a boolean mask but coercing when indexing with otherwise-equivalent indexers; these now consistently coerce, along with :meth:`Series.mask` and :meth:`Series.where` (:issue:`45768`) - Bug in :meth:`DataFrame.where` with multiple columns with datetime-like dtypes failing to downcast results consistent with other dtypes (:issue:`45837`) - Bug in :func:`isin` upcasting to ``float64`` with unsigned integer dtype and list-like argument without a dtype (:issue:`46485`) diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index c68ffec600c8a..e2bc08c03ed3c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -409,6 +409,8 @@ def __setitem__(self, key, value): if len(value) and not lib.is_string_array(value, skipna=True): raise ValueError("Must provide strings.") + value[isna(value)] = libmissing.NA + super().__setitem__(key, value) def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a70f9b7b20d5a..852e75619df89 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1719,6 +1719,10 @@ def _unwrap_setitem_indexer(self, indexer): elif lib.is_integer(indexer[1]) and indexer[1] == 0: # reached via setitem_single_block passing the whole indexer indexer = indexer[0] + + elif com.is_null_slice(indexer[1]): + indexer = indexer[0] + else: raise NotImplementedError( "This should not be reached. Please report a bug at " diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 24d7365b52159..34668193ed132 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1340,6 +1340,22 @@ def test_loc_internals_not_updated_correctly(self): ) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("val", [None, [None], pd.NA, [pd.NA]]) + def test_iloc_setitem_string_list_na(self, val): + # GH#45469 + df = DataFrame({"a": ["a", "b", "c"]}, dtype="string") + df.iloc[[0], :] = val + expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string") + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize("val", [None, pd.NA]) + def test_iloc_setitem_string_na(self, val): + # GH#45469 + df = DataFrame({"a": ["a", "b", "c"]}, dtype="string") + df.iloc[0, :] = val + expected = DataFrame({"a": [pd.NA, "b", "c"]}, dtype="string") + tm.assert_frame_equal(df, expected) + class TestDataFrameIndexingUInt64: def test_setitem(self, uint64_frame):
- [x] closes #45469 (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47987
2022-08-05T22:16:27Z
2022-08-08T21:46:14Z
2022-08-08T21:46:14Z
2022-08-08T21:48:56Z
BUG: to_string using wrong na_rep for ea dtype in multiindex
diff --git a/doc/source/whatsnew/v1.5.0.rst b/doc/source/whatsnew/v1.5.0.rst index bdf811f6a8f6a..ecec18c8b8242 100644 --- a/doc/source/whatsnew/v1.5.0.rst +++ b/doc/source/whatsnew/v1.5.0.rst @@ -958,6 +958,7 @@ I/O - Bug in :func:`read_sas` with certain types of compressed SAS7BDAT files (:issue:`35545`) - Bug in :func:`read_excel` not forward filling :class:`MultiIndex` when no names were given (:issue:`47487`) - Bug in :func:`read_sas` returned ``None`` rather than an empty DataFrame for SAS7BDAT files with zero rows (:issue:`18198`) +- Bug in :meth:`DataFrame.to_string` using wrong missing value with extension arrays in :class:`MultiIndex` (:issue:`47986`) - Bug in :class:`StataWriter` where value labels were always written with default encoding (:issue:`46750`) - Bug in :class:`StataWriterUTF8` where some valid characters were removed from variable names (:issue:`47276`) - Bug in :meth:`DataFrame.to_excel` when writing an empty dataframe with :class:`MultiIndex` (:issue:`19543`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 60f727f54b621..5a9b1e6943608 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -54,6 +54,7 @@ ensure_int64, ensure_platform_int, is_categorical_dtype, + is_extension_array_dtype, is_hashable, is_integer, is_iterator, @@ -1370,7 +1371,7 @@ def format( stringified_levels = [] for lev, level_codes in zip(self.levels, self.codes): - na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type) + na = na_rep if na_rep is not None else _get_na_rep(lev.dtype) if len(lev) > 0: @@ -3889,6 +3890,11 @@ def sparsify_labels(label_list, start: int = 0, sentinel=""): def _get_na_rep(dtype) -> str: + if is_extension_array_dtype(dtype): + return f"{dtype.na_value}" + else: + dtype = dtype.type + return {np.datetime64: "NaT", np.timedelta64: "NaT"}.get(dtype, "NaN") diff --git a/pandas/tests/frame/test_repr_info.py b/pandas/tests/frame/test_repr_info.py index f42660b297cb0..86c8e36cb7bd4 100644 --- a/pandas/tests/frame/test_repr_info.py +++ b/pandas/tests/frame/test_repr_info.py @@ -9,6 +9,7 @@ import pytest from pandas import ( + NA, Categorical, DataFrame, MultiIndex, @@ -342,6 +343,19 @@ def test_frame_to_string_with_periodindex(self): # it works! frame.to_string() + def test_to_string_ea_na_in_multiindex(self): + # GH#47986 + df = DataFrame( + {"a": [1, 2]}, + index=MultiIndex.from_arrays([Series([NA, 1], dtype="Int64")]), + ) + + result = df.to_string() + expected = """ a +<NA> 1 +1 2""" + assert result == expected + def test_datetime64tz_slice_non_truncate(self): # GH 30263 df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")})
- [ ] closes #xxxx (Replace xxxx with the Github issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/47986
2022-08-05T20:59:24Z
2022-08-08T21:49:56Z
2022-08-08T21:49:56Z
2022-08-08T21:56:55Z
Backport PR #47581 on branch 1.4.x (BUG: DataFrame.loc not aligning rhs df for single block case)
diff --git a/doc/source/whatsnew/v1.4.4.rst b/doc/source/whatsnew/v1.4.4.rst index dce8fb60ecdd6..b4224b5b210e0 100644 --- a/doc/source/whatsnew/v1.4.4.rst +++ b/doc/source/whatsnew/v1.4.4.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression in :func:`concat` materializing :class:`Index` during sorting even if :class:`Index` was already sorted (:issue:`47501`) - Fixed regression in :meth:`DataFrame.loc` not updating the cache correctly after values were set (:issue:`47867`) +- Fixed regression in :meth:`DataFrame.loc` not aligning index in some cases when setting a :class:`DataFrame` (:issue:`47578`) - Fixed regression in setting ``None`` or non-string value into a ``string``-dtype Series using a mask (:issue:`47628`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5b25f5be01d29..fcd822988de20 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4527,8 +4527,8 @@ def _sanitize_column(self, value) -> ArrayLike: """ self._ensure_valid_index(value) - # We should never get here with DataFrame value - if isinstance(value, Series): + # We can get there through loc single_block_path + if isinstance(value, (DataFrame, Series)): return _reindex_for_setitem(value, self.index) if is_list_like(value): diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index a7af569e397eb..4dc9cbdb7b34d 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1323,6 +1323,15 @@ def test_loc_internals_not_updated_correctly(self): ) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("val", ["x", 1]) + @pytest.mark.parametrize("idxr", ["a", ["a"]]) + def test_loc_setitem_rhs_frame(self, idxr, val): + # GH#47578 + df = DataFrame({"a": [1, 2]}) + df.loc[:, "a"] = DataFrame({"a": [val, 11]}, index=[1, 2]) + expected = DataFrame({"a": [np.nan, val]}) + tm.assert_frame_equal(df, expected) + class TestDataFrameIndexingUInt64: def test_setitem(self, uint64_frame): diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 673d347917832..c438dc78ce397 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -699,6 +699,13 @@ def test_setitem_npmatrix_2d(self): tm.assert_frame_equal(df, expected) + def test_setitem_rhs_dataframe(self): + # GH#47578 + df = DataFrame({"a": [1, 2]}) + df["a"] = DataFrame({"a": [10, 11]}, index=[1, 2]) + expected = DataFrame({"a": [np.nan, 10]}) + tm.assert_frame_equal(df, expected) + class TestSetitemTZAwareValues: @pytest.fixture
Backport PR #47581 cc @simonjayhawkins WIll put up a pr for the whatsnew later
https://api.github.com/repos/pandas-dev/pandas/pulls/47984
2022-08-05T19:14:33Z
2022-08-08T13:04:04Z
2022-08-08T13:04:03Z
2022-08-08T21:29:16Z