Sindbad~EG File Manager

Current Path : /usr/local/lib/python3.12/site-packages/pandas/core/__pycache__/
Upload File :
Current File : //usr/local/lib/python3.12/site-packages/pandas/core/__pycache__/resample.cpython-312.pyc

�

MٜgZu���UddlmZddlZddlmZddlmZmZmZm	Z	m
Z
mZddlZddl
ZddlmZddlmZmZmZmZmZmZmZddlmZddlmZdd	lmZdd
l m!Z!ddl"m#Z#m$Z$m%Z%ddl&m'Z'm(Z(dd
l)m*Z*ddl+m,Z,m-Z-ddl.m/cm0Z1ddl2m3Z3m4Z4ddl5m6Z6ddl7m8Z8m9Z9ddl:m/cm;Z<ddl=m>Z>m?Z?ddl@mAZAddlBmCZCmDZDmEZEmFZFmGZGddlHmIZIddlJmKZKddlLmMZMddlNmOZOddlPmQZQmRZRddlSmTZTmUZUddlVmWZWmXZXddlYmZZZm[Z[ddl\m]Z]m^Z^er(ddl_m`Z`ddlmaZambZbmcZcmdZdmeZemfZfmgZgmhZhmiZimjZjmkZkdd llmmZmmnZniZod!epd"<Gd#�d$eCe8�ZqGd%�d&e8e9�ZrGd'�d(eq�ZsGd)�d*eres�ZtGd+�d,es�ZuGd-�d.ereu�ZvGd/�d0es�ZwGd1�d2erew�Zxd@dAd3�Zyeqj�ey_z						dB							dCd4�Z{Gd5�d6eI�Z|	dD									dEd7�Z}			dF															dGd8�Z~			dF													dHd9�Z										dId:�Z�				dJ															dKd;�Z�				dL					dMd<�Z�dNd=�Z�dOd>�Z�								dPd?�Z�y)Q�)�annotationsN)�dedent)�
TYPE_CHECKING�Callable�Literal�cast�final�
no_type_check)�lib)�
BaseOffset�IncompatibleFrequency�NaT�Period�	Timedelta�	Timestamp�	to_offset)�freq_to_period_freqstr)�NDFrameT)�function��AbstractMethodError)�Appender�Substitution�doc)�find_stack_level�rewrite_warning)�
ArrowDtype)�ABCDataFrame�	ABCSeries)�ResamplerWindowApply�warn_alias_replacement)�ArrowExtensionArray)�PandasObject�SelectionMixin)�NDFrame�_shared_docs)�
SeriesGroupBy)�BaseGroupBy�GroupBy�_apply_groupings_depr�_pipe_template�get_groupby)�Grouper)�
BinGrouper)�
MultiIndex)�Index)�
DatetimeIndex�
date_range)�PeriodIndex�period_range)�TimedeltaIndex�timedelta_range)�is_subperiod�is_superperiod)�Day�Tick)�Hashable)�AnyArrayLike�Axis�AxisInt�	Frequency�
IndexLabel�InterpolateOptions�T�TimedeltaConvertibleTypes�TimeGrouperOrigin�TimestampConvertibleTypes�npt)�	DataFrame�Serieszdict[str, str]�_shared_docs_kwargsc	���eZdZUdZded<ded<ded<e�Zded	<ehd
��Zgd�Z			dAdd
dd�													dBd�Z
edCd��ZedDd��Z
eedEd���ZdFd�Zd�Zed��Zeedd��ee�				dG�fd����Zed�Zed�Zeeedeedd� �dHd!���ZeZeZed"��Zd#�Z dIdJd$�Z!dHdKd%�Z"d&�Z#e	dL			dMd'��Z$d(�Z%edHdJd)��Z&edHdJd*��Z'edHdJd+��Z(edHdJd,��Z)e	dNdd
dd-d
e*jVd.�									dOd/��Z,edHd0��Z-e		dP			dQd1��Z.e		dP			dQd2��Z/e		dP			dQd3��Z0e		dP			dQd4��Z1eee2jf�			dR					dSd5���Z3eee2jh�			dR					dSd6���Z4eee2jj�dTdUd7���Z5e	dT	dUd8��Z6e		dV			dWd9��Z7e		dV			dWd:��Z8eee2jr�		dV			dWd;���Z9eee2jt�d<���Z:eee;jx�d=���Z<eee2jz�d>���Z=eee2j|�d?���Z>edXdYd@��Z?�xZ@S)Z�	Resamplera/
    Class for resampling datetimelike data, a groupby-like operation.
    See aggregate, transform, and apply functions on this object.

    It's easiest to use obj.resample(...) to use Resampler.

    Parameters
    ----------
    obj : Series or DataFrame
    groupby : TimeGrouper
    axis : int, default 0
    kind : str or None
        'period', 'timestamp' to override default index treatment

    Returns
    -------
    a Resampler of the appropriate type

    Notes
    -----
    After resampling, see aggregate, apply, and transform functions.
    r.�_grouper�TimeGrouper�_timegrouperz,DatetimeIndex | TimedeltaIndex | PeriodIndex�binnerzfrozenset[Hashable]�
exclusions>�ax�obj�_indexer)�freq�axis�closed�label�
convention�kind�origin�offsetrNFT)�
group_keys�	selection�include_groupsc���||_d|_d|_|j|�|_||_||_d|_||_|jj|j|�d|��\|_|_|_
|j�\|_|_||_|jj$�&t'|jj$g�|_yt'�|_y)NT)�sort�	gpr_index)rN�keysr`�_get_axis_numberrUrYr\�as_indexr^�_set_grouper�_convert_objrRrQrS�_get_binnerrOrL�
_selection�key�	frozensetrP)	�selfrR�timegrouperrUrYrar\r]r^s	         �?/usr/local/lib/python3.12/site-packages/pandas/core/resample.py�__init__zResampler.__init__�s���(�����	���	��(�(��.��	���	�$�����
�,���+/�+<�+<�+I�+I����c�"���,J�,
�(���$�'�4�=�&*�%5�%5�%7�"���T�]�#������ � �,�'��):�):�)>�)>�(?�@�D�O�'�k�D�O�c����fd��jD�}t��j�ddj|��d�S)z@
        Provide a nice str repr of our rolling object.
        c3��K�|]6}t�j|d��|�dt�j|������8y�w)N�=)�getattrrN)�.0�krks  �rm�	<genexpr>z$Resampler.__str__.<locals>.<genexpr>�sG�����
�%���t�(�(�!�T�2�>��c��7�4�,�,�a�0�1�2�%�s�<?z [z, �])�_attributes�type�__name__�join)rk�attrss` rm�__str__zResampler.__str__�sB���

��%�%�
��
�t�*�%�%�&�b����5�)9�(:�!�<�<roc���||jvrtj||�S||jvrt	|j
|�S||jvr||Stj||�S�N)�_internal_names_set�object�__getattribute__rxrsrNrR)rk�attrs  rm�__getattr__zResampler.__getattr__�sm���4�+�+�+��*�*�4��6�6��4�#�#�#��4�,�,�d�3�3��4�8�8����:���&�&�t�T�2�2roc��|jduxr2|jjduxs|jjduS)zP
        Is the resampling from a DataFrame column or MultiIndex level.
        N)rNri�level�rks rm�_from_selectionzResampler._from_selection�sG��� � ��,�
����!�!��-�T��1B�1B�1H�1H�PT�1T�	
roc�"�|j�S)z�
        Provide any conversions for the object in order to correctly handle.

        Parameters
        ----------
        obj : Series or DataFrame

        Returns
        -------
        Series or DataFrame
        )�_consolidate)rkrRs  rmrfzResampler._convert_obj�s�����!�!roc��t|��rrr�s rm�_get_binner_for_timezResampler._get_binner_for_time��
��!�$�'�'roc��|j�\}}}t|�t|�k(sJ�t|||j��}||fS)zk
        Create the BinGrouper, assume that self.set_grouper(obj)
        has already been called.
        )�indexer)r��lenr.rS)rkrO�bins�	binlabels�bin_groupers     rmrgzResampler._get_binner�sL��#'�";�";�"=����i��4�y�C�	�N�*�*�*� ��y�$�-�-�H���{�"�"roa�
    >>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
    ...                   index=pd.date_range('2012-08-02', periods=4))
    >>> df
                A
    2012-08-02  1
    2012-08-03  2
    2012-08-04  3
    2012-08-05  4

    To get the difference between each 2-day period's maximum and minimum
    value in one pass, you can do

    >>> df.resample('2D').pipe(lambda x: x.max() - x.min())
                A
    2012-08-02  1
    2012-08-04  1)�klass�examplesc�*��t�|�|g|��i|��Sr)�super�pipe)rk�func�args�kwargs�	__class__s    �rmr�zResampler.pipes���8�w�|�D�2�4�2�6�2�2roa[
    See Also
    --------
    DataFrame.groupby.aggregate : Aggregate using callable, string, dict,
        or list of string/callables.
    DataFrame.resample.transform : Transforms the Series on each group
        based on the given function.
    DataFrame.aggregate: Aggregate using one or more
        operations over the specified axis.
    a�
    Examples
    --------
    >>> s = pd.Series([1, 2, 3, 4, 5],
    ...               index=pd.date_range('20130101', periods=5, freq='s'))
    >>> s
    2013-01-01 00:00:00    1
    2013-01-01 00:00:01    2
    2013-01-01 00:00:02    3
    2013-01-01 00:00:03    4
    2013-01-01 00:00:04    5
    Freq: s, dtype: int64

    >>> r = s.resample('2s')

    >>> r.agg("sum")
    2013-01-01 00:00:00    3
    2013-01-01 00:00:02    7
    2013-01-01 00:00:04    5
    Freq: 2s, dtype: int64

    >>> r.agg(['sum', 'mean', 'max'])
                         sum  mean  max
    2013-01-01 00:00:00    3   1.5    2
    2013-01-01 00:00:02    7   3.5    4
    2013-01-01 00:00:04    5   5.0    5

    >>> r.agg({'result': lambda x: x.mean() / x.std(),
    ...        'total': "sum"})
                           result  total
    2013-01-01 00:00:00  2.121320      3
    2013-01-01 00:00:02  4.949747      7
    2013-01-01 00:00:04       NaN      5

    >>> r.agg(average="mean", total="sum")
                             average  total
    2013-01-01 00:00:00      1.5      3
    2013-01-01 00:00:02      3.5      7
    2013-01-01 00:00:04      5.0      5
    �	aggregaterG�)�see_alsor�r�rUc�t�t||||��j�}|�|}|j|g|��i|��}|S)N)r�r�)r �agg�_groupby_and_aggregate)rkr�r�r��result�hows      rmr�zResampler.aggregateWsH��&�d�D�t�F�K�O�O�Q���>��C�0�T�0�0��F�t�F�v�F�F��
roc�t�|jj|j�j|g|��i|��S)aw
        Call function producing a like-indexed Series on each group.

        Return a Series with the transformed values.

        Parameters
        ----------
        arg : function
            To apply to each group. Should return a Series with the same index.

        Returns
        -------
        Series

        Examples
        --------
        >>> s = pd.Series([1, 2],
        ...               index=pd.date_range('20180101',
        ...                                   periods=2,
        ...                                   freq='1h'))
        >>> s
        2018-01-01 00:00:00    1
        2018-01-01 01:00:00    2
        Freq: h, dtype: int64

        >>> resampled = s.resample('15min')
        >>> resampled.transform(lambda x: (x - x.mean()) / x.std())
        2018-01-01 00:00:00   NaN
        2018-01-01 01:00:00   NaN
        Freq: h, dtype: float64
        )�
_selected_obj�groupbyrN�	transform)rk�argr�r�s    rmr�zResampler.transformjsE��BG�t�!�!�)�)�$�*;�*;�<�F�F��
��
� �
�	
roc��t|��rr)rk�fr�s   rm�_downsamplezResampler._downsample�r�roc��t|��rr)rkr��limit�
fill_values    rm�	_upsamplezResampler._upsample�r�roc���|j}|�%|j}|�||}n|jdk(sJ�|dk(r|jdk(sJ�t|d||j|j
��}|S)�
        Sub-classes to define. Return a sliced object.

        Parameters
        ----------
        key : string / list of selections
        ndim : {1, 2}
            requested ndim of result
        subset : object, default None
            subset to act on
        N���by�grouperrUr\)rLrR�ndimr,rUr\)rkrir��subsetr��groupeds      rm�_gotitemzResampler._gotitem�sz���-�-���>��X�X�F��������{�{�a�'�'�'��1�9��;�;�!�#�#�#���t�W�4�9�9����
���roc������|j}|j}t|d||j|j��}	t��r���fd�}|j
|�}n|j�g���i���}|j|�S#ttf$rt|�g���d|ji���}Y�>t$r5}	dt|	�vrn�t|�g���d|ji���}Yd}	~	�vd}	~	wwxYw)zA
        Re-evaluate the obj with a groupby aggregation.
        Nr�c����|g���i���Sr�)�xr�r�r�s ���rm�<lambda>z2Resampler._groupby_and_aggregate.<locals>.<lambda>�s����Q�!8��!8��!8ror^zMust produce aggregated value)rL�_obj_with_exclusionsr,rUr\�callabler��AttributeError�KeyError�_applyr^�
ValueError�str�_wrap_result)
rkr�r�r�r�rRr�r�r��errs
 ```      rmr�z Resampler._groupby_and_aggregate�s����-�-���'�'����D�'��	�	�d�o�o�
�� 	���}�9�� �*�*�4�0��*��*�*�3�@��@��@��6� � ��(�(��5��)�
	�����#��48�4G�4G��KQ��F��	�.�#�c�(�:�������#��48�4G�4G��KQ��F��	�s�:B�+C6�9C6�+C1�1C6c�,�|j||||��S)zG
        Return the correct class for resampling with groupby.
        )r�ri�parentr^)�_resampler_for_grouping)rkr�rir^s    rm�_get_resampler_for_groupingz%Resampler._get_resampler_for_grouping�s%���+�+���T�.�,�
�	
roc��|j}t|t�r]t|�dk(rOt|jt
�s5|j
t|jdd|j��d��}t|t�r|j�|j|_t|t�rG|jr;t|jdd|j��|_t|dd�|_|jj�4|jj!|jj�|_|S)z/
        Potentially wrap any results.
        rN�rTT)�append�name)rR�
isinstancerr��indexr3�	set_index�_asfreq_compatrTrrhr��emptyrsrN�_arrow_dtype�astype)rkr�rRs   rmr�zResampler._wrap_result�s���
�h�h���v�|�,��F��q� ��v�|�|�[�9��%�%��s�y�y��!�}�4�9�9�=�d�&��F��f�i�(�T�_�_�-H��/�/�F�K��f�i�(�V�\�\�)�#�)�)�B�Q�-�d�i�i�H�F�L�!�#�v�t�4�F�K����)�)�5�!�<�<�.�.�t�/@�/@�/M�/M�N�F�L��
roc�(�|jd|��S)a�
        Forward fill the values.

        Parameters
        ----------
        limit : int, optional
            Limit of how many values to fill.

        Returns
        -------
        An upsampled Series.

        See Also
        --------
        Series.fillna: Fill NA/NaN values using the specified method.
        DataFrame.fillna: Fill NA/NaN values using the specified method.

        Examples
        --------
        Here we only create a ``Series``.

        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64

        Example for ``ffill`` with downsampling (we have fewer dates after resampling):

        >>> ser.resample('MS').ffill()
        2023-01-01    1
        2023-02-01    3
        Freq: MS, dtype: int64

        Example for ``ffill`` with upsampling (fill the new dates with
        the previous value):

        >>> ser.resample('W').ffill()
        2023-01-01    1
        2023-01-08    1
        2023-01-15    2
        2023-01-22    2
        2023-01-29    2
        2023-02-05    3
        2023-02-12    3
        2023-02-19    4
        Freq: W-SUN, dtype: int64

        With upsampling and limiting (only fill the first new date with the
        previous value):

        >>> ser.resample('W').ffill(limit=1)
        2023-01-01    1.0
        2023-01-08    1.0
        2023-01-15    2.0
        2023-01-22    2.0
        2023-01-29    NaN
        2023-02-05    3.0
        2023-02-12    NaN
        2023-02-19    4.0
        Freq: W-SUN, dtype: float64
        �ffill�r��r��rkr�s  rmr�zResampler.ffill	s��F�~�~�g�U�~�3�3roc�(�|jd|��S)a�
        Resample by using the nearest value.

        When resampling data, missing values may appear (e.g., when the
        resampling frequency is higher than the original frequency).
        The `nearest` method will replace ``NaN`` values that appeared in
        the resampled data with the value from the nearest member of the
        sequence, based on the index value.
        Missing values that existed in the original data will not be modified.
        If `limit` is given, fill only this many values in each direction for
        each of the original values.

        Parameters
        ----------
        limit : int, optional
            Limit of how many values to fill.

        Returns
        -------
        Series or DataFrame
            An upsampled Series or DataFrame with ``NaN`` values filled with
            their nearest value.

        See Also
        --------
        backfill : Backward fill the new missing values in the resampled data.
        pad : Forward fill ``NaN`` values.

        Examples
        --------
        >>> s = pd.Series([1, 2],
        ...               index=pd.date_range('20180101',
        ...                                   periods=2,
        ...                                   freq='1h'))
        >>> s
        2018-01-01 00:00:00    1
        2018-01-01 01:00:00    2
        Freq: h, dtype: int64

        >>> s.resample('15min').nearest()
        2018-01-01 00:00:00    1
        2018-01-01 00:15:00    1
        2018-01-01 00:30:00    2
        2018-01-01 00:45:00    2
        2018-01-01 01:00:00    2
        Freq: 15min, dtype: int64

        Limit the number of upsampled values imputed by the nearest:

        >>> s.resample('15min').nearest(limit=1)
        2018-01-01 00:00:00    1.0
        2018-01-01 00:15:00    1.0
        2018-01-01 00:30:00    NaN
        2018-01-01 00:45:00    2.0
        2018-01-01 01:00:00    2.0
        Freq: 15min, dtype: float64
        �nearestr�r�r�s  rmr�zResampler.nearestNs��v�~�~�i�u�~�5�5roc�(�|jd|��S)a�

        Backward fill the new missing values in the resampled data.

        In statistics, imputation is the process of replacing missing data with
        substituted values [1]_. When resampling data, missing values may
        appear (e.g., when the resampling frequency is higher than the original
        frequency). The backward fill will replace NaN values that appeared in
        the resampled data with the next value in the original sequence.
        Missing values that existed in the original data will not be modified.

        Parameters
        ----------
        limit : int, optional
            Limit of how many values to fill.

        Returns
        -------
        Series, DataFrame
            An upsampled Series or DataFrame with backward filled NaN values.

        See Also
        --------
        bfill : Alias of backfill.
        fillna : Fill NaN values using the specified method, which can be
            'backfill'.
        nearest : Fill NaN values with nearest neighbor starting from center.
        ffill : Forward fill NaN values.
        Series.fillna : Fill NaN values in the Series using the
            specified method, which can be 'backfill'.
        DataFrame.fillna : Fill NaN values in the DataFrame using the
            specified method, which can be 'backfill'.

        References
        ----------
        .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)

        Examples
        --------
        Resampling a Series:

        >>> s = pd.Series([1, 2, 3],
        ...               index=pd.date_range('20180101', periods=3, freq='h'))
        >>> s
        2018-01-01 00:00:00    1
        2018-01-01 01:00:00    2
        2018-01-01 02:00:00    3
        Freq: h, dtype: int64

        >>> s.resample('30min').bfill()
        2018-01-01 00:00:00    1
        2018-01-01 00:30:00    2
        2018-01-01 01:00:00    2
        2018-01-01 01:30:00    3
        2018-01-01 02:00:00    3
        Freq: 30min, dtype: int64

        >>> s.resample('15min').bfill(limit=2)
        2018-01-01 00:00:00    1.0
        2018-01-01 00:15:00    NaN
        2018-01-01 00:30:00    2.0
        2018-01-01 00:45:00    2.0
        2018-01-01 01:00:00    2.0
        2018-01-01 01:15:00    NaN
        2018-01-01 01:30:00    3.0
        2018-01-01 01:45:00    3.0
        2018-01-01 02:00:00    3.0
        Freq: 15min, dtype: float64

        Resampling a DataFrame that has missing values:

        >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
        ...                   index=pd.date_range('20180101', periods=3,
        ...                                       freq='h'))
        >>> df
                               a  b
        2018-01-01 00:00:00  2.0  1
        2018-01-01 01:00:00  NaN  3
        2018-01-01 02:00:00  6.0  5

        >>> df.resample('30min').bfill()
                               a  b
        2018-01-01 00:00:00  2.0  1
        2018-01-01 00:30:00  NaN  3
        2018-01-01 01:00:00  NaN  3
        2018-01-01 01:30:00  6.0  5
        2018-01-01 02:00:00  6.0  5

        >>> df.resample('15min').bfill(limit=2)
                               a    b
        2018-01-01 00:00:00  2.0  1.0
        2018-01-01 00:15:00  NaN  NaN
        2018-01-01 00:30:00  NaN  3.0
        2018-01-01 00:45:00  NaN  3.0
        2018-01-01 01:00:00  NaN  3.0
        2018-01-01 01:15:00  NaN  NaN
        2018-01-01 01:30:00  6.0  5.0
        2018-01-01 01:45:00  6.0  5.0
        2018-01-01 02:00:00  6.0  5.0
        �bfillr�r�r�s  rmr�zResampler.bfill�s��J�~�~�g�U�~�3�3roc��tjt|�j�d�tt���|j
||��S)a
        Fill missing values introduced by upsampling.

        In statistics, imputation is the process of replacing missing data with
        substituted values [1]_. When resampling data, missing values may
        appear (e.g., when the resampling frequency is higher than the original
        frequency).

        Missing values that existed in the original data will
        not be modified.

        Parameters
        ----------
        method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
            Method to use for filling holes in resampled data

            * 'pad' or 'ffill': use previous valid observation to fill gap
              (forward fill).
            * 'backfill' or 'bfill': use next valid observation to fill gap.
            * 'nearest': use nearest valid observation to fill gap.

        limit : int, optional
            Limit of how many consecutive missing values to fill.

        Returns
        -------
        Series or DataFrame
            An upsampled Series or DataFrame with missing values filled.

        See Also
        --------
        bfill : Backward fill NaN values in the resampled data.
        ffill : Forward fill NaN values in the resampled data.
        nearest : Fill NaN values in the resampled data
            with nearest neighbor starting from center.
        interpolate : Fill NaN values using interpolation.
        Series.fillna : Fill NaN values in the Series using the
            specified method, which can be 'bfill' and 'ffill'.
        DataFrame.fillna : Fill NaN values in the DataFrame using the
            specified method, which can be 'bfill' and 'ffill'.

        References
        ----------
        .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)

        Examples
        --------
        Resampling a Series:

        >>> s = pd.Series([1, 2, 3],
        ...               index=pd.date_range('20180101', periods=3, freq='h'))
        >>> s
        2018-01-01 00:00:00    1
        2018-01-01 01:00:00    2
        2018-01-01 02:00:00    3
        Freq: h, dtype: int64

        Without filling the missing values you get:

        >>> s.resample("30min").asfreq()
        2018-01-01 00:00:00    1.0
        2018-01-01 00:30:00    NaN
        2018-01-01 01:00:00    2.0
        2018-01-01 01:30:00    NaN
        2018-01-01 02:00:00    3.0
        Freq: 30min, dtype: float64

        >>> s.resample('30min').fillna("backfill")
        2018-01-01 00:00:00    1
        2018-01-01 00:30:00    2
        2018-01-01 01:00:00    2
        2018-01-01 01:30:00    3
        2018-01-01 02:00:00    3
        Freq: 30min, dtype: int64

        >>> s.resample('15min').fillna("backfill", limit=2)
        2018-01-01 00:00:00    1.0
        2018-01-01 00:15:00    NaN
        2018-01-01 00:30:00    2.0
        2018-01-01 00:45:00    2.0
        2018-01-01 01:00:00    2.0
        2018-01-01 01:15:00    NaN
        2018-01-01 01:30:00    3.0
        2018-01-01 01:45:00    3.0
        2018-01-01 02:00:00    3.0
        Freq: 15min, dtype: float64

        >>> s.resample('30min').fillna("pad")
        2018-01-01 00:00:00    1
        2018-01-01 00:30:00    1
        2018-01-01 01:00:00    2
        2018-01-01 01:30:00    2
        2018-01-01 02:00:00    3
        Freq: 30min, dtype: int64

        >>> s.resample('30min').fillna("nearest")
        2018-01-01 00:00:00    1
        2018-01-01 00:30:00    2
        2018-01-01 01:00:00    2
        2018-01-01 01:30:00    3
        2018-01-01 02:00:00    3
        Freq: 30min, dtype: int64

        Missing values present before the upsampling are not affected.

        >>> sm = pd.Series([1, None, 3],
        ...                index=pd.date_range('20180101', periods=3, freq='h'))
        >>> sm
        2018-01-01 00:00:00    1.0
        2018-01-01 01:00:00    NaN
        2018-01-01 02:00:00    3.0
        Freq: h, dtype: float64

        >>> sm.resample('30min').fillna('backfill')
        2018-01-01 00:00:00    1.0
        2018-01-01 00:30:00    NaN
        2018-01-01 01:00:00    NaN
        2018-01-01 01:30:00    3.0
        2018-01-01 02:00:00    3.0
        Freq: 30min, dtype: float64

        >>> sm.resample('30min').fillna('pad')
        2018-01-01 00:00:00    1.0
        2018-01-01 00:30:00    1.0
        2018-01-01 01:00:00    NaN
        2018-01-01 01:30:00    NaN
        2018-01-01 02:00:00    3.0
        Freq: 30min, dtype: float64

        >>> sm.resample('30min').fillna('nearest')
        2018-01-01 00:00:00    1.0
        2018-01-01 00:30:00    NaN
        2018-01-01 01:00:00    NaN
        2018-01-01 01:30:00    3.0
        2018-01-01 02:00:00    3.0
        Freq: 30min, dtype: float64

        DataFrame resampling is done column-wise. All the same options are
        available.

        >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
        ...                   index=pd.date_range('20180101', periods=3,
        ...                                       freq='h'))
        >>> df
                               a  b
        2018-01-01 00:00:00  2.0  1
        2018-01-01 01:00:00  NaN  3
        2018-01-01 02:00:00  6.0  5

        >>> df.resample('30min').fillna("bfill")
                               a  b
        2018-01-01 00:00:00  2.0  1
        2018-01-01 00:30:00  NaN  3
        2018-01-01 01:00:00  NaN  3
        2018-01-01 01:30:00  6.0  5
        2018-01-01 02:00:00  6.0  5
        zv.fillna is deprecated and will be removed in a future version. Use obj.ffill(), obj.bfill(), or obj.nearest() instead.��
stacklevelr�)�warnings�warnryrz�
FutureWarningrr�)rk�methodr�s   rm�fillnazResampler.fillna�sK��~	�
�
��D�z�"�"�#�$(�
(�
�'�)�	
��~�~�f�E�~�2�2ro�forward)rUr��inplace�limit_direction�
limit_area�downcastc��|tjusJ�|jd�}	|	jd|||||||d�|��S)a�
        Interpolate values between target timestamps according to different methods.

        The original index is first reindexed to target timestamps
        (see :meth:`core.resample.Resampler.asfreq`),
        then the interpolation of ``NaN`` values via :meth:`DataFrame.interpolate`
        happens.

        Parameters
        ----------
        method : str, default 'linear'
            Interpolation technique to use. One of:

            * 'linear': Ignore the index and treat the values as equally
              spaced. This is the only method supported on MultiIndexes.
            * 'time': Works on daily and higher resolution data to interpolate
              given length of interval.
            * 'index', 'values': use the actual numerical values of the index.
            * 'pad': Fill in NaNs using existing values.
            * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
              'barycentric', 'polynomial': Passed to
              `scipy.interpolate.interp1d`, whereas 'spline' is passed to
              `scipy.interpolate.UnivariateSpline`. These methods use the numerical
              values of the index.  Both 'polynomial' and 'spline' require that
              you also specify an `order` (int), e.g.
              ``df.interpolate(method='polynomial', order=5)``. Note that,
              `slinear` method in Pandas refers to the Scipy first order `spline`
              instead of Pandas first order `spline`.
            * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
              'cubicspline': Wrappers around the SciPy interpolation methods of
              similar names. See `Notes`.
            * 'from_derivatives': Refers to
              `scipy.interpolate.BPoly.from_derivatives`.

        axis : {{0 or 'index', 1 or 'columns', None}}, default None
            Axis to interpolate along. For `Series` this parameter is unused
            and defaults to 0.
        limit : int, optional
            Maximum number of consecutive NaNs to fill. Must be greater than
            0.
        inplace : bool, default False
            Update the data in place if possible.
        limit_direction : {{'forward', 'backward', 'both'}}, Optional
            Consecutive NaNs will be filled in this direction.

            If limit is specified:
                * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
                * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
                  'backwards'.

            If 'limit' is not specified:
                * If 'method' is 'backfill' or 'bfill', the default is 'backward'
                * else the default is 'forward'

                raises ValueError if `limit_direction` is 'forward' or 'both' and
                    method is 'backfill' or 'bfill'.
                raises ValueError if `limit_direction` is 'backward' or 'both' and
                    method is 'pad' or 'ffill'.

        limit_area : {{`None`, 'inside', 'outside'}}, default None
            If limit is specified, consecutive NaNs will be filled with this
            restriction.

            * ``None``: No fill restriction.
            * 'inside': Only fill NaNs surrounded by valid values
              (interpolate).
            * 'outside': Only fill NaNs outside valid values (extrapolate).

        downcast : optional, 'infer' or None, defaults to None
            Downcast dtypes if possible.

            .. deprecated:: 2.1.0

        ``**kwargs`` : optional
            Keyword arguments to pass on to the interpolating function.

        Returns
        -------
        DataFrame or Series
            Interpolated values at the specified freq.

        See Also
        --------
        core.resample.Resampler.asfreq: Return the values at the new freq,
            essentially a reindex.
        DataFrame.interpolate: Fill NaN values using an interpolation method.

        Notes
        -----
        For high-frequent or non-equidistant time-series with timestamps
        the reindexing followed by interpolation may lead to information loss
        as shown in the last example.

        Examples
        --------

        >>> start = "2023-03-01T07:00:00"
        >>> timesteps = pd.date_range(start, periods=5, freq="s")
        >>> series = pd.Series(data=[1, -1, 2, 1, 3], index=timesteps)
        >>> series
        2023-03-01 07:00:00    1
        2023-03-01 07:00:01   -1
        2023-03-01 07:00:02    2
        2023-03-01 07:00:03    1
        2023-03-01 07:00:04    3
        Freq: s, dtype: int64

        Upsample the dataframe to 0.5Hz by providing the period time of 2s.

        >>> series.resample("2s").interpolate("linear")
        2023-03-01 07:00:00    1
        2023-03-01 07:00:02    2
        2023-03-01 07:00:04    3
        Freq: 2s, dtype: int64

        Downsample the dataframe to 2Hz by providing the period time of 500ms.

        >>> series.resample("500ms").interpolate("linear")
        2023-03-01 07:00:00.000    1.0
        2023-03-01 07:00:00.500    0.0
        2023-03-01 07:00:01.000   -1.0
        2023-03-01 07:00:01.500    0.5
        2023-03-01 07:00:02.000    2.0
        2023-03-01 07:00:02.500    1.5
        2023-03-01 07:00:03.000    1.0
        2023-03-01 07:00:03.500    2.0
        2023-03-01 07:00:04.000    3.0
        Freq: 500ms, dtype: float64

        Internal reindexing with ``asfreq()`` prior to interpolation leads to
        an interpolated timeseries on the basis the reindexed timestamps (anchors).
        Since not all datapoints from original series become anchors,
        it can lead to misleading interpolation results as in the following example:

        >>> series.resample("400ms").interpolate("linear")
        2023-03-01 07:00:00.000    1.0
        2023-03-01 07:00:00.400    1.2
        2023-03-01 07:00:00.800    1.4
        2023-03-01 07:00:01.200    1.6
        2023-03-01 07:00:01.600    1.8
        2023-03-01 07:00:02.000    2.0
        2023-03-01 07:00:02.400    2.2
        2023-03-01 07:00:02.800    2.4
        2023-03-01 07:00:03.200    2.6
        2023-03-01 07:00:03.600    2.8
        2023-03-01 07:00:04.000    3.0
        Freq: 400ms, dtype: float64

        Note that the series erroneously increases between two anchors
        ``07:00:00`` and ``07:00:02``.
        �asfreq)r�rUr�r�r�r�r�r�)r�
no_defaultr��interpolate)
rkr�rUr�r�r�r�r�r�r�s
          rmr�zResampler.interpolate�s^��H�3�>�>�)�)�)�����)��!�v�!�!�	
�����+�!��	
��	
�		
roc�(�|jd|��S)a�
        Return the values at the new freq, essentially a reindex.

        Parameters
        ----------
        fill_value : scalar, optional
            Value to use for missing values, applied during upsampling (note
            this does not fill NaNs that already were present).

        Returns
        -------
        DataFrame or Series
            Values at the specified freq.

        See Also
        --------
        Series.asfreq: Convert TimeSeries to specified frequency.
        DataFrame.asfreq: Convert TimeSeries to specified frequency.

        Examples
        --------

        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-31', '2023-02-01', '2023-02-28']))
        >>> ser
        2023-01-01    1
        2023-01-31    2
        2023-02-01    3
        2023-02-28    4
        dtype: int64
        >>> ser.resample('MS').asfreq()
        2023-01-01    1
        2023-02-01    3
        Freq: MS, dtype: int64
        r�)r�r�)rkr�s  rmr�zResampler.asfreqKs��J�~�~�h�:�~�>�>roc��tt|�d||�tjd||�|j	d||��S)a
        Compute sum of group values.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns.

            .. versionchanged:: 2.0.0

                numeric_only no longer accepts ``None``.

        min_count : int, default 0
            The required number of valid values to perform the operation. If fewer
            than ``min_count`` non-NA values are present the result will be NA.

        Returns
        -------
        Series or DataFrame
            Computed sum of values within each group.

        Examples
        --------
        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64
        >>> ser.resample('MS').sum()
        2023-01-01    3
        2023-02-01    7
        Freq: MS, dtype: int64
        �sum��numeric_only�	min_count��maybe_warn_args_and_kwargsry�nv�validate_resampler_funcr��rkr�r�r�r�s     rmr�z
Resampler.sumrsA��X	#�4��:�u�d�F�C�
�"�"�5�$��7�����L�I��V�Vroc��tt|�d||�tjd||�|j	d||��S)a	
        Compute prod of group values.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only float, int, boolean columns.

            .. versionchanged:: 2.0.0

                numeric_only no longer accepts ``None``.

        min_count : int, default 0
            The required number of valid values to perform the operation. If fewer
            than ``min_count`` non-NA values are present the result will be NA.

        Returns
        -------
        Series or DataFrame
            Computed prod of values within each group.

        Examples
        --------
        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64
        >>> ser.resample('MS').prod()
        2023-01-01    2
        2023-02-01   12
        Freq: MS, dtype: int64
        �prodr�r�r�s     rmr�zResampler.prod�sA��X	#�4��:�v�t�V�D�
�"�"�6�4��8�����\�Y��W�Wroc��tt|�d||�tjd||�|j	d||��S)a
        Compute min value of group.

        Returns
        -------
        Series or DataFrame

        Examples
        --------
        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64
        >>> ser.resample('MS').min()
        2023-01-01    1
        2023-02-01    3
        Freq: MS, dtype: int64
        �minr�r�r�s     rmrz
Resampler.min�s@��>	#�4��:�u�d�F�C�
�"�"�5�$��7�����L�I��V�Vroc��tt|�d||�tjd||�|j	d||��S)a
        Compute max value of group.

        Returns
        -------
        Series or DataFrame

        Examples
        --------
        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64
        >>> ser.resample('MS').max()
        2023-01-01    2
        2023-02-01    4
        Freq: MS, dtype: int64
        �maxr�r�r�s     rmrz
Resampler.max�s@��<	#�4��:�u�d�F�C�
�"�"�5�$��7�����L�I��V�Vroc��tt|�d||�tjd||�|j	d|||��S)N�first�r�r��skipnar��rkr�r�rr�r�s      rmrzResampler.firstsI��	#�4��:�w��f�E�
�"�"�7�D�&�9�����,�)�F� �
�	
roc��tt|�d||�tjd||�|j	d|||��S)N�lastrr�rs      rmr	zResampler.last'sI��	#�4��:�v�t�V�D�
�"�"�6�4��8�������6� �
�	
roc��tt|�d||�tjd||�|j	d|��S)N�median�r�r��rkr�r�r�s    rmrzResampler.median7s>��	#�4��:�x��v�F�
�"�"�8�T�6�:�����|��D�Droc��tt|�d||�tjd||�|j	d|��S)aX
        Compute mean of groups, excluding missing values.

        Parameters
        ----------
        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        DataFrame or Series
            Mean of values within each group.

        Examples
        --------

        >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex(
        ...                 ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15']))
        >>> ser
        2023-01-01    1
        2023-01-15    2
        2023-02-01    3
        2023-02-15    4
        dtype: int64
        >>> ser.resample('MS').mean()
        2023-01-01    1.5
        2023-02-01    3.5
        Freq: MS, dtype: float64
        �meanrr�r
s    rmrzResampler.mean>s?��P	#�4��:�v�t�V�D�
�"�"�6�4��8�����\��B�Broc��tt|�d||�tjd||�|j	d||��S)a�
        Compute standard deviation of groups, excluding missing values.

        Parameters
        ----------
        ddof : int, default 1
            Degrees of freedom.
        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        DataFrame or Series
            Standard deviation of values within each group.

        Examples
        --------

        >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
        ...                 index=pd.DatetimeIndex(['2023-01-01',
        ...                                         '2023-01-10',
        ...                                         '2023-01-15',
        ...                                         '2023-02-01',
        ...                                         '2023-02-10',
        ...                                         '2023-02-15']))
        >>> ser.resample('MS').std()
        2023-01-01    1.000000
        2023-02-01    2.645751
        Freq: MS, dtype: float64
        �std��ddofr�r��rkrr�r�r�s     rmrz
Resampler.stdjsA��X	#�4��:�u�d�F�C�
�"�"�5�$��7�����D�|��L�Lroc��tt|�d||�tjd||�|j	d||��S)a�
        Compute variance of groups, excluding missing values.

        Parameters
        ----------
        ddof : int, default 1
            Degrees of freedom.

        numeric_only : bool, default False
            Include only `float`, `int` or `boolean` data.

            .. versionadded:: 1.5.0

            .. versionchanged:: 2.0.0

                numeric_only now defaults to ``False``.

        Returns
        -------
        DataFrame or Series
            Variance of values within each group.

        Examples
        --------

        >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
        ...                 index=pd.DatetimeIndex(['2023-01-01',
        ...                                         '2023-01-10',
        ...                                         '2023-01-15',
        ...                                         '2023-02-01',
        ...                                         '2023-02-10',
        ...                                         '2023-02-15']))
        >>> ser.resample('MS').var()
        2023-01-01    1.0
        2023-02-01    7.0
        Freq: MS, dtype: float64

        >>> ser.resample('MS').var(ddof=0)
        2023-01-01    0.666667
        2023-02-01    4.666667
        Freq: MS, dtype: float64
        �varrr�rs     rmrz
Resampler.var�sA��d	#�4��:�u�d�F�C�
�"�"�5�$��7�����D�|��L�Lroc��tt|�d||�tjd||�|j	d||��S)N�semrr�rs     rmrz
Resampler.sem�s@��	#�4��:�u�d�F�C�
�"�"�5�$��7�����D�|��L�Lroc��tt|�d||�tjd||�|j}|j
}t
|�dk(r�|j�}t|j|j�|_	|jdk(r'|j�}|jgd�d��}|Stj|j gd�g�}|j|d��}|S|j#d�S)N�ohlcrr�)�open�high�low�close�rU)r�ryr�r�rQr�r��copyr�r�rTr��to_frame�reindexr/�from_product�columnsr�)rkr�r�rQrR�mis      rmrzResampler.ohlc�s���	#�4��:�v�t�V�D�
�"�"�6�4��8�
�W�W���'�'���r�7�a�<��(�(�*�C�&�s�y�y�$�)�)�<�C�I��x�x�1�}��l�l�n���k�k�"B��k�K���J�	 �,�,��[�[�"B�C����k�k�"�1�k�-���J�����'�'roc��tt|�d||�tjd||�|j	d�S)N�nuniquer�)rkr�r�s   rmr'zResampler.nunique�s9��	#�4��:�y�$��G�
�"�"�9�d�F�;����	�*�*roc�J�|jd�}t|t�r|js|j	d��}t|j�sNddlm}|jjdk(r|jj}nd}|g|jd|��}|S)	N�sizeT)�future_stackr)rHr��int64�r��dtyper�)
r�r�rr��stackr�rQ�pandasrHr�r�r�r�)rkr�rHr�s    rmr)zResampler.sizes����!�!�&�)���f�l�+�F�L�L��\�\�t�\�4�F��4�7�7�|�%��!�!�&�&�!�+��)�)�.�.�����B�f�l�l�'��M�F��
roc�N�|jd�}t|j�s~|jjdk(r?t|j�g|jd|jj��}|Sddlm	}|g|j|jd��}|S)N�countr�r+r,r)rG)r�r$r-)r�r�rQr�r�ryr�r�r/rGr$)rkr�rGs   rmr1zResampler.counts����!�!�'�*���4�7�7�|��!�!�&�&�!�+�1��d�0�0�1��f�l�l�'��@R�@R�@W�@W����
�
-�"��f�l�l�F�N�N�'����
roc�*�|jdd|i|��S)a�
        Return value at the given quantile.

        Parameters
        ----------
        q : float or array-like, default 0.5 (50% quantile)

        Returns
        -------
        DataFrame or Series
            Quantile of values within each group.

        See Also
        --------
        Series.quantile
            Return a series, where the index is q and the values are the quantiles.
        DataFrame.quantile
            Return a DataFrame, where the columns are the columns of self,
            and the values are the quantiles.
        DataFrameGroupBy.quantile
            Return a DataFrame, where the columns are groupby columns,
            and the values are its quantiles.

        Examples
        --------

        >>> ser = pd.Series([1, 3, 2, 4, 3, 8],
        ...                 index=pd.DatetimeIndex(['2023-01-01',
        ...                                         '2023-01-10',
        ...                                         '2023-01-15',
        ...                                         '2023-02-01',
        ...                                         '2023-02-10',
        ...                                         '2023-02-15']))
        >>> ser.resample('MS').quantile()
        2023-01-01    2.0
        2023-02-01    4.0
        Freq: MS, dtype: float64

        >>> ser.resample('MS').quantile(.25)
        2023-01-01    1.5
        2023-02-01    3.5
        Freq: MS, dtype: float64
        �q)�quantile)r�)rkr3r�s   rmr4zResampler.quantile*s ��Z �t���:�a�:�6�:�:ro)rN)rRr%rlrMrUr=rar0r\�boolr^r5�return�None)r6r�)r�r�)r6r5�rRrr6r)r�z/Callable[..., T] | tuple[Callable[..., T], str]r6rBr�NN�r��
int | None)r��int�T)r�r)r^r5)�linear)
r�rArUr=r�r;r�r5r�z&Literal['forward', 'backward', 'both'])Fr)r�r5r�r<)FrT)r�r5r�r<rr5�F)r�r5)r�F)rr<r�r5)g�?)r3z"float | list[float] | AnyArrayLike)Arz�
__module__�__qualname__�__doc__�__annotations__rjrP�setr�rxrnr	r}r��propertyr�rfr�rgrrr+r�r�_agg_see_also_doc�_agg_examples_docrr&r�r��applyr�r�r�r�r�r�r�r�r�r�r�rr�r�r�r�r�rrr)rr	rrrrrrr'r'r)r1r4�
__classcell__�r�s@rmrKrKys�����.����8�8�&/�k�J�#�1��7�8��	�K��
�*�!��#�*�
�*�!�*��	*��*��*��*�
�*�>�	=��	=��3��3��
�
���
�"�(��#��#������(�n��3�=�3�

�3��)��,3��		����'	�)��V���[�!�"�"��
�������C��E�
�"
��"
�H(�(��8/)�b�<@�
��
�59�
��
��8�B4��B4�H�:6��:6�x�d4��d4�L�e3��e3�N�&.�n
�� ��BK�����n
�"�n
��	n
�
�n
��
n
�@�n
��n
�`�$?��$?�L�#��-W��-W��-W��-W�^�#��-X��-X��-X��-X�^�#�� W�� W�� W�� W�D�#��W��W��W��W�B������#���	
��
��
��	
���
�������#���	
��
��
��	
���
�������E���E�
�#�)C��)C��)C�V��"�-M��-M��-M��-M�^��"�3M��3M��3M��3M�j�������"�	M��	M��	M���	M�������(���(�4���	�	��+� ��+�����������$���������� �,;��,;rorKc��eZdZUdZded<dZded<ded<d	ed
<dddd�									dd
�Zed��ZeZ	eZ
eZedd��Z
y)�
_GroupByMixinz)
    Provide the groupby facilities.
    z	list[str]rxN�IndexLabel | Nonerhr)�_groupbyrMrNF)rir]r^c	��t|t�sJt|���t|t�sJt|���|jD]}t||t
||���||_|j|_||_	||_
tj|j�|_|j|_
|j|_||_yr)r�r)ryrKrx�setattrrsrhrOrirNr rNrQrRr^)rkr�r�rir]r^r�s       rmrnz_GroupByMixin.__init__ds����'�7�+�:�T�'�]�:�+��&�)�,�:�d�6�l�:�,��$�$�D��D�$���� 5�6�%�#����m�m��������
� �I�I�f�&9�&9�:����)�)����:�:���,��roc����������fd�}t�j|�j��}�j|�S)z�
        Dispatch to _upsample; we are stripping all of the _upsample kwargs and
        performing the original function call on the grouped object.
        c�����j|�j�j��}t�t�rt|��di���S|j�g���i���S)N)rlrar�)�_resampler_clsrNrQr�r�rsrH)r�r�r�r�rks ����rmr�z"_GroupByMixin._apply.<locals>.func�sb����#�#�A�4�3D�3D�PT�PW�PW�#�X�A��!�S�!�$�w�q�!�}�.�v�.�.��1�7�7�1�.�t�.�v�.�.ro)r^)r�rNr^r�)rkr�r�r�r�r�s````  rmr�z_GroupByMixin._apply�s3���	/���
�
�t�D�<O�<O�P��� � ��(�(roc��|�%|j}|�||}n|jdk(sJ�	t|t�r5|j|vr'|j�|j|j�|j|}|j||�}t|�|tt|�|��}|S#t$r|j}Y�MwxYw)r�r�)r�r�r])rRr�r��listrir�rN�
IndexError�_infer_selectionryrrK)rkrir�r�r�r]�new_rss       rmr�z_GroupByMixin._gotitem�s����>��X�X�F��������{�{�a�'�'�'�	$��#�t�$�����)<����AU��
�
�4�8�8�$��m�m�C�(�G��)�)�#�v�6�	���d����	�4�(��
��
�
���	$��m�m�G�	$�s�AB3�3C�
C)
r�rKr�r)r]rMr^r5r6r7r)rzr@rArBrCrhrnr
r�r�r�r�r	r�r�rormrLrLZs������$(�J�!�(�����
�'+�$�-��-��	-�%�
-��-�
�-�@�)��)�"�I��K�#��
�$��$rorLc�V��eZdZUded<ed��Zd�Zd�Zd�Zd	d
d�Z	�fd�Z
�xZS)�DatetimeIndexResamplerr1rQc��tSr)�DatetimeIndexResamplerGroupbyr�s rmr�z.DatetimeIndexResampler._resampler_for_grouping�s��,�,roc��|jdk(r%|jj|j�S|jj	|j�S)N�period)rYrN�_get_time_period_binsrQ�_get_time_binsr�s rmr�z+DatetimeIndexResampler._get_binner_for_time�sD���9�9�� ��$�$�:�:�4�7�7�C�C�� � �/�/����8�8roc�r�|}tj|�xs|}||k7r
t|||�|j}|j}t|�s�|j
�}|jj|j�|_|jj|jk(s'J|jj|jf��|S|j�|j�=t|jj�t|�kDr|�|j�S|jdk(r-|j|j�j |fi|��}n@|j"j|j�j |fi|��j"}|j%|�S)��
        Downsample the cython defined function.

        Parameters
        ----------
        how : string / cython mapped function
        **kwargs : kw args passed to how function
        r)�com�get_cython_funcr!rQr�r�r r��
_with_freqrT�
inferred_freqrLr�r�rUr�r�rBr�)rkr�r��orig_howrQrRr�s       rmr�z"DatetimeIndexResampler._downsample�s_�����!�!�#�&�-�#���s�?�"�4��3�7�
�W�W���'�'���2�w��(�(�*�C��	�	�,�,�T�Y�Y�7�C�I��9�9�>�>�T�Y�Y�.�K���������0K�K�.��J��W�W�
 �B�$4�$4�$@��D�M�M�+�+�,�s�2�w�6����;�;�=� ��9�9��>�9�S�[�[����/�9�9�#�H��H�F�<�S�U�U�]�]�4�=�=�1�;�;�C�J�6�J�L�L�F�� � ��(�(roc�<�|jdk(r|dd}|S|dd}|S)z|
        Adjust our binner when upsampling.

        The range of a new index should not be outside specified range
        �rightr�N���)rV�rkrOs  rm�_adjust_binner_for_upsamplez2DatetimeIndexResampler._adjust_binner_for_upsample�s4���;�;�'�!��A�B�Z�F��
��C�R�[�F��
roc���|jrtd��|jrtd��|j}|j
}|j}|j|�}|�Qt|j�|jk(r/t|�t|�k(r|j�}||_
n|dk(rd}|j||||��}|j|�S)a�
        Parameters
        ----------
        method : string {'backfill', 'bfill', 'pad',
            'ffill', 'asfreq'} method for upsampling
        limit : int, default None
            Maximum size gap to fill when reindexing
        fill_value : scalar, default None
            Value to use for missing values

        See Also
        --------
        .fillna: Fill NA/NaN values using the specified method.

        zaxis must be 0zvUpsampling from level= or on= selection is not supported, use .set_index(...) to explicitly set index to datetime-likeNr�)r�r�r�)rU�AssertionErrorr�r�rQr�rOrlrrfrTr�r r�r"r�)	rkr�r�r�rQrRrO�	res_indexr�s	         rmr�z DatetimeIndexResampler._upsample	s��� �9�9� �!1�2�2�����;��
��W�W��� � �������4�4�V�<�	�
�M��"�*�*�+�t�y�y�8��C��C�	�N�*��X�X�Z�F�$�F�L���!����[�[��&��*�!��F�� � ��(�(roc���t�|�|�}|jdk(r�t|jt
�s�t|jt�r}t|jjdt
�sT|jjdj|j�}|jj|d��|_|S|jj|j�|_|S)Nr^rj)r�)r�r�rYr�r�r3r/�levels�	to_periodrT�
set_levels)rkr��	new_levelr�s   �rmr�z#DatetimeIndexResampler._wrap_result8s������%�f�-���9�9�� ��F�L�L�+�)N��&�,�,�
�3�!�&�,�,�"5�"5�b�"9�;�G� &��� 3� 3�B� 7� A� A�$�)�)� L�I�#)�<�<�#:�#:�9�B�#:�#O�F�L��
� &�|�|�5�5�d�i�i�@����
ror9r:)rzr@rArCrEr�r�r�rlr�r�rIrJs@rmrZrZ�s;�����
�-��-�9�,)�\
�-)�^
�
rorZc� �eZdZdZed��Zy)r\z9
    Provides a resample of a groupby implementation
    c��tSr)rZr�s rmrSz,DatetimeIndexResamplerGroupby._resampler_clsQs��%�%roN�rzr@rArBrErSr�rormr\r\Js����&��&ror\c�^��eZdZUded<ed��Z�fd�Zd�fd�Z�fd�Zd	d
�fd�
Z	�xZ
S)�PeriodIndexResamplerr3rQc�V�tjdtt���tS)NzgResampling a groupby with a PeriodIndex is deprecated. Cast to DatetimeIndex before resampling instead.r�)r�r�r�r�PeriodIndexResamplerGroupbyr�s rmr�z,PeriodIndexResampler._resampler_for_grouping[s$���
�
�
?��'�)�		
�+�*roc���|jdk(rt�|�	�S|jj	|j
�S)N�	timestamp)rYr�r�rN�_get_period_binsrQ)rkr�s �rmr�z)PeriodIndexResampler._get_binner_for_timees8����9�9��#��7�/�1�1�� � �1�1�$�'�'�:�:roc���t�|�|�}|jr
d}t|��|jdk(r|j|j��}|S)Nz�Resampling from level= or on= selection with a PeriodIndex is not currently supported, use .set_index(...) to explicitly set indexr}�r�)r�rfr��NotImplementedErrorrY�to_timestamprX)rkrR�msgr�s   �rmrfz!PeriodIndexResampler._convert_objjs\����g�"�3�'�����>�
�
&�c�*�*��9�9��#��"�"�t���"�7�C��
roc�D��|jdk(rt�|�|fi|��S|}tj|�xs|}||k7r
t|||�|j}t|j|j�r|j|fi|��St|j|j�r&|dk(r|j|�S|j�S|j|jk(r|j�Std|j�d|j�d���)rbr}rz
Frequency z cannot be resampled to z&, as they are not sub or super periods)
rYr�r�rcrdr!rQr7rTr�r8r�r
)rkr�r�rgrQr�s     �rmr�z PeriodIndexResampler._downsample|s����9�9��#��7�&�s�5�f�5�5����!�!�#�&�-�#���s�?�"�4��3�7�
�W�W���������+�.�4�.�.�s�=�f�=�=�
�B�G�G�T�Y�Y�
/��f�}��2�2�3�7�7��;�;�=� �
�W�W��	�	�
!��;�;�=� �#�����	�!9�$�)�)��E3�
3�
�	
roc�f��|jdk(rt�
|�	|||��S|j}|j}|j
}|j
|j|j��}|dk(rd}|j|||��}t||||j��}	|j|	�S)a�
        Parameters
        ----------
        method : {'backfill', 'bfill', 'pad', 'ffill'}
            Method for upsampling.
        limit : int, default None
            Maximum size gap to fill when reindexing.
        fill_value : scalar, default None
            Value to use for missing values.

        See Also
        --------
        .fillna: Fill NA/NaN values using the specified method.

        r})r�r�r�r�N)r�r�r)
rYr�r�rQrRrOr�rTrX�get_indexer�_take_new_indexrUr�)rkr�r�r�rQrR�	new_index�membr��new_objr�s          �rmr�zPeriodIndexResampler._upsample�s����"�9�9��#��7�$�V�5�Z�$�P�P�
�W�W���h�h���K�K�	��y�y�������y�8���X���F��"�"�9�V�5�"�I��!�������	
��� � ��)�)ror8r9r:)rzr@rArCrEr�r�rfr�r�rIrJs@rmryryVs6���	�O�
�+��+�;�
�$%
�N%*�%*roryc� �eZdZdZed��Zy)r{�:
    Provides a resample of a groupby implementation.
    c��tSr)ryr�s rmrSz*PeriodIndexResamplerGroupby._resampler_cls�s��#�#roNrwr�rormr{r{�s����$��$ror{c�4�eZdZUded<ed��Zd�Zd�Zy)�TimedeltaIndexResamplerr5rQc��tSr)�TimedeltaIndexResamplerGroupbyr�s rmr�z/TimedeltaIndexResampler._resampler_for_grouping�s��-�-roc�L�|jj|j�Sr)rN�_get_time_delta_binsrQr�s rmr�z,TimedeltaIndexResampler._get_binner_for_time�s��� � �5�5�d�g�g�>�>roc��|S)z�
        Adjust our binner when upsampling.

        The range of a new index is allowed to be greater than original range
        so we don't need to change the length of a binner, GH 13022
        r�rks  rmrlz3TimedeltaIndexResampler._adjust_binner_for_upsample�s	���
roN)rzr@rArCrEr�r�rlr�rormr�r��s&��	��
�.��.�?�ror�c� �eZdZdZed��Zy)r�r�c��tSr)r�r�s rmrSz-TimedeltaIndexResamplerGroupby._resampler_cls�s��&�&roNrwr�rormr�r��s����'��'ror�c�@�t|fi|��}|j||��S)z8
    Create a TimeGrouper and return our resampler.
    �rY)rM�_get_resampler)rRrY�kwds�tgs    rm�
get_resamplerr��s)��
�S�	!�D�	!�B�
���S�t��,�,roc��td||d�|��}	|	j|j|��}
|
j|||	j��S)zA
    Return our appropriate resampler when grouping as well.
    )rTrir�)r�r^rir�)rMr�rRr�ri)r��ruler��fill_methodr�rY�onr^r�r��	resamplers           rm�get_resampler_for_groupingr�sT��
�	1�$�B�	1�&�	1�B��!�!�'�+�+�D�!�9�I��0�0���B�F�F�1��roc���eZdZUdZej
dzZded<														d																											d�fd�
Zddd�Z	d					dd�Z	dd	�Z
						dd
�Zdd�Zdd�Z
dd
�Z	ddd�							d�fd�Z�xZS)rMah
    Custom groupby class for time-interval grouping.

    Parameters
    ----------
    freq : pandas date offset or offset alias for identifying bin edges
    closed : closed end of interval; 'left' or 'right'
    label : interval boundary to use for labeling; 'left' or 'right'
    convention : {'start', 'end', 'e', 's'}
        If axis is PeriodIndex
    )rVrWr�rYrXrZr[rDrZNc�b��|dvrtd|�d���|dvrtd|�d���|dvrtd|�d���|�|�t|jt�s|�#|�!t	||dd�dk(rt|d	�
�}nt|�}hd�}|j}||vsd|vr|d|jd�|vr	|�d
}|�d
}n|dvr	|�d
}|�d
}n|�d}|�d}||_||_	|
|_
|�|nd|_||_||_
|	|_||_d|_|dvr||_n	t%|�|_	|
�t)|
�nd|_d	|d<t-�|�\d|||d�|��y#tt&f$r}td|�d��|�d}~wwxYw#tt&f$r}td|
�d��|�d}~wwxYw)N>N�leftrizUnsupported value z for `label`z
 for `closed`>N�e�s�end�startz for `convention`r-r^T)�	is_period>�W�ME�QE�YE�BME�BQE�BYE�-ri�r��end_dayr�r�)�epochr��	start_dayr�r�z|'origin' should be equal to 'epoch', 'start', 'start_day', 'end', 'end_day' or should be a Timestamp convertible type. Got 'z
' instead.z6'offset' should be a Timedelta convertible type. Got 'r`)rTrirUr�)r�r�r�r3rsr�	rule_code�findrVrWrYrXr�r�r�r\r�rZr�	TypeErrorrr[r�rn)rkrRrTrirVrWr�rUr�r�rYrXrZr[r\r��	end_typesr�r�r�s                   �rmrnzTimeGrouper.__init__7sF���*�/�/��1�%���E�F�F��0�0��1�&���G�H�H��=�=��1�*��=N�O�P�P�
�K����3�9�9�k�2����O��C��H�g�t�4��@��T�T�2�D��T�?�D�@�	��~�~���9������6F��	�	�#��1G�9�1T��~� ���}����+�+��>�$�F��=�#�E��>�#�F��=�"�E������
���	�(2�(>�*�C������&�����
�$���/3����F�F�!�D�K�
�'��/���	�/5�/A�)�F�+�t�D�K���v��
���A�d��$�A�&�A��%�	�*�
� �D�DJ�8�:�W���	��
���I�&�	����x�z�+���
��	�s0�%E#�6F
�#F�2F�F�
F.�F)�)F.c�l�|j|d��\}}}t|t�r%t||||j|j
|��St|t�s|dk(r~t|t�r%tjdtt���n$tjdtt���t||||j|j
|��St|t�r$t|||j|j
|��Std	t|�j �d
���)ad
        Return my resampler or raise if we have an invalid axis.

        Parameters
        ----------
        obj : Series or DataFrame
        kind : string, optional
            'period','timestamp','timedelta' are valid

        Returns
        -------
        Resampler

        Raises
        ------
        TypeError if incompatible axis

        N�ra)rlrYrUr\rar^zcResampling with a PeriodIndex is deprecated. Cast index to DatetimeIndex before resampling instead.r�zIResampling with kind='period' is deprecated.  Use datetime paths instead.)rlrUr\razVOnly valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, but got an instance of '�')rer�r1rZrUr\r3r�r�r�rryr5r�r�ryrz)rkrRrY�_rQs     rmr�zTimeGrouper._get_resampler�s(��&�$�$�S�D�$�9���2�q��b�-�(�)�� ���Y�Y��?�?��
�
���K�
(�D�H�,<��"�k�*��
�
�M�!�/�1�	��
�
�2�!�/�1�	�(�� ���Y�Y��?�?��
�
���N�
+�*�� ��Y�Y��?�?���
��
'�'+�B�x�'8�'8�&9��
<�
�	
roc�p�|j|�}|jtt|j�fSr)r�rLrrrR)rkrR�validate�rs    rm�_get_grouperzTimeGrouper._get_grouper�s.��
����$���z�z�4��!�%�%�0�0�0roc
��t|t�s!tdt|�j����t|�dk(r4tg|j|j|j��x}}|g|fSt|j�|j�|j|j|j|j|j��\}}t!|j|||j"|jdd|j��x}}|j$}|j'||�\}}t)j*|||j|j,��}|jd	k(r|}|j.d	k(r|d
d}n|j.d	k(r|d
d}|j,r,|j1dt2�}|j1dt2�}t|�t|�kr|dt|�}|||fS)N�5axis must be a DatetimeIndex, but got an instance of r��datarTr�r-��unitrVrZr[T�
shift_forward)rTr�r��tzr��	ambiguous�nonexistentr�)�hasnansrir�)r�r1r�ryrzr�rTr�r-�_get_timestamp_range_edgesrrr�rVrZr[r2r��asi8�_adjust_bin_edgesr�generate_bins_dt64r�rW�insertr)	rkrQrO�labelsrr	�	ax_values�	bin_edgesr�s	         rmr`zTimeGrouper._get_time_bins�s����"�m�,��"�"&�r�(�"3�"3�!4�6��
�
�r�7�a�<�+��d�i�i�b�g�g�R�X�X��
�F�V��2�v�%�%�0��F�F�H��F�F�H��I�I�����;�;��;�;��;�;�
���t� %�������u�u�����'����	
�		
����G�G�	� �2�2�6�9�E���	��%�%��y�$�+�+�r�z�z�
���;�;�'�!��F��z�z�W�$������
�Z�Z�7�
"��A�B�Z�F�
�:�:��]�]�1�c�*�F��]�]�1�c�*�F�
�t�9�s�6�{�"��K�c�$�i�(�F��t�V�#�#roc�Z�|jjdvs*|jjjd�ddvr�|jdk(r�|j	d�}|td|j��j|j�ztd|j��j|j�z
}|j	|j�j}n|j}|d	|j�kDr
|dd
}|dd
}||fS|j}||fS)N)r�r�r�r�r)r�r�r�r�r�rir�)�daysr��r����rj)rTr��splitrV�tz_localizerr��as_unitr�r�r)rkrOr��	edges_dtir�s     rmr�zTimeGrouper._adjust_bin_edges0	s��
�9�9�>�>�/�/�4�9�9�>�>�3G�3G��3L�Q�3O�T
�4
��{�{�g�%�"�.�.�t�4�	���Q�Y�^�^�<�D�D�Y�^�^�T�U���	���7�?�?�	���O�P��
&�1�1�&�)�)�<�A�A�	�"�K�K�	���}�y�}�}��.�%�c�r�N�	�������y� � ����I��y� � roc��t|t�s!tdt|�j����t|j
t�std|j
����t|�s)tg|j
|j��x}}|g|fS|j�|j�}}|jdk(r||j
z
}t|||j
|j��x}}|}|jdk(r||j
z
}|j||j��}|jr||jz
}|||fS)Nz6axis must be a TimedeltaIndex, but got an instance of zWResampling on a TimedeltaIndex requires fixed-duration `freq`, e.g. '24h' or '3D', not �r�rTr�ri�r�r�rTr�r���side)r�r5r�ryrzrTr:r�r�r�rrrVr6�searchsortedr[)rkrQrOr�r�r��
end_stampsr�s        rmr�z TimeGrouper._get_time_delta_binsT	s>���"�n�-��"�"&�r�(�"3�"3�!4�6��
�
�$�)�)�T�*��+�+/�9�9�+�7��
�
�2�w�,�"�4�9�9�2�7�7�S�S�F�V��2�v�%�%��V�V�X�r�v�v�x�s���;�;�'�!��4�9�9��C�)��S�t�y�y�r�w�w�
�	
����
��;�;�&� ��$�)�)�#�J����z�����<���;�;��d�k�k�!�F��t�V�#�#roc���t|t�s!tdt|�j����|j
}t
|�dk(r*tg||j|j��x}}|g|fSt|d|d||j��x}}||zj|d�j�}|jr|j|j�}|j|d��}|||fS)	Nr�rr�rjr�r�r�r�)r�r1r�ryrzrTr�r3r�r-r4r�r�r�r�r�)rkrQrTrOr�r�r�s       rmr_z!TimeGrouper._get_time_period_bins{	s����"�m�,��"�"&�r�(�"3�"3�!4�6��
�
�y�y���r�7�a�<�)��d��������
�F�V��2�v�%�%�&�R��U��2��T�PR�PW�PW�X�X����t�m�+�+�D�#�6�C�C�E�
�
�5�5�#�/�/����6�J����z���7���t�V�#�#roc���t|t�s!tdt|�j����|j|j|j��}d}|jr/tj|j�}||j}t|�swtjgtj��}tg|j|j��x}}t|�dkDrt!|||t|��\}}}|||fS|jj"}|j%�j|j|j��}|j'�j|jd��}	d}
t|jt(�rzt+||	|j|j,|j.|j0��\}}	t3||j�t3||j�z
}|j"|z}
|}t5||	|j|j��x}}|j6}
t|�|z}||
d	|
dz
z
}tj8|
d|
d	|z|�}||z
}||
z}t|j:�||j<��}|j?|d
��}|dkDrt!||||�\}}}|||fS)Nz3axis must be a PeriodIndex, but got an instance of r�r)r-r�r�)rVrZr[r�rjr�r�) r�r3r�ryrzr�rTrXr��npr��_isnanr��arrayr+r��_insert_nat_bin�nrrr:�_get_period_range_edgesrVrZr[rr4r��arange�_datar-r�)rkrQr��	nat_countr�rOr��	freq_multr�r��	bin_shift�p_start�start_offset�i8�expected_bins_count�	i8_extend�rng�prngs                  rmr~zTimeGrouper._get_period_bins�	s����"�k�*��"�"&�r�(�"3�"3�!4�6��
�
�y�y�������y�8���	��<�<����t�{�{�+�I������%�D��4�y��8�8�B�b�h�h�/�D�)�r��	�	����P�P�F�V��2�w��{�'6�v�t�V�S�QS�W�'U�$���f��4��'�'��I�I�K�K�	��������	�	�t����?���f�f�h�o�o�d�i�i�U�o�3���	��d�i�i��&�
3����	�	��{�{��{�{��{�{�
�L�G�S�"�%����3�f�W�d�i�i�6P�P�L�$����2�I��E�&��S�t�y�y�r�w�w�
�	
����Y�Y��"�&�k�I�5��'�2�b�6�B�q�E�>�:�	��i�i��1��r�"�v�	�1�9�=���y����y��� �t�D�J�J���4�:�:�6��� � ��F� �3���q�=�#2�6�4���#S� �F�D�&��t�V�#�#ror�c�"��t�|�|||��\}}}t|jt�rZ|jj
dvrB|j|_ttt|j�j��}|||fS)Nr��Mm)r�rer�r-rrYr�r0rr"r��_maybe_convert_datelike_array)rkrRr`rarQr�r�s      �rmrezTimeGrouper._set_grouper�	s|���!�7�/��T�Y�/�O���R���b�h�h�
�+����
�
��0E� "���D����(�"�(�(�3�Q�Q�S��B��B���ro)N�MinNNNrrNNNNr�NF)rRzGrouper | NonerTr?ri�
str | NonerV�Literal['left', 'right'] | NonerWr�r�r�rUr=r�r;rYr�rXz(Literal['start', 'end', 'e', 's'] | NonerZzTLiteral['epoch', 'start', 'start_day', 'end', 'end_day'] | TimestampConvertibleTypesr[z TimedeltaConvertibleTypes | Noner\r5r6r7r)rRr%r6rKr=)rRrr�r5r6ztuple[BinGrouper, NDFrameT])rQr1)rOr1r�znpt.NDArray[np.int64]r6z+tuple[DatetimeIndex, npt.NDArray[np.int64]])rQr5)rQr3r?)rRrr`r5razIndex | Noner6z3tuple[NDFrameT, Index, npt.NDArray[np.intp] | None])rzr@rArBr-rxrCrnr�r�r`r�r�r_r~rerIrJs@rmrMrMs����
��%�%�)��K�
��#���26�15���� ��?C�&1�37� �!iB�
�iB��iB��	iB�
0�iB�/�
iB��iB��iB��iB��iB�=�iB�$�iB�1�iB� �!iB�$
�%iB�VB
�J/3�1��1�'+�1�	$�1�A$�F"!�#�"!�0E�"!�	4�"!�H%$�N$�0I$�X+0�	 �NR�	 ��	 �#'�	 �?K�	 �	<�	 �	 rorMc�j�t|t�r>tj|j|�}|j|||j��St|t�rK|dk(rtd��|jj||d��}|j||j��Std��)N)r�r�r�zaxis 1 is not supported)�new_axisr�rU)�axesz.'obj' should be either a Series or a DataFrame)r�r�algos�take_nd�_values�_constructorr�rr��_mgr�reindex_indexer�_constructor_from_mgrrr�)rRr�r�rU�
new_values�new_mgrs      rmr�r��	s����#�y�!��]�]�3�;�;��8�
�����i�c�h�h� �
�	
�
�C��	&��1�9�%�&?�@�@��(�(�*�*�I�w�UV�*�W���(�(��w�|�|�(�D�D��I�J�Jroc	��t|t�r�|j}t|t�r|jdu|duk7rt	d��|dk(r
td|��}t|t
�rC|j
d�}|j
d�}t|t�r|j
d�}t|||||||��\}}t|t
�r"|j
|�}|j
|�}||fS|j�}|j�}|dk(rt|j|��}nt||z
�}t||z�}||fS)aW
    Adjust the `first` Timestamp to the preceding Timestamp that resides on
    the provided offset. Adjust the `last` Timestamp to the following
    Timestamp that resides on the provided offset. Input Timestamps that
    already reside on the offset will be adjusted depending on the type of
    offset and the `closed` parameter.

    Parameters
    ----------
    first : pd.Timestamp
        The beginning Timestamp of the range to be adjusted.
    last : pd.Timestamp
        The ending Timestamp of the range to be adjusted.
    freq : pd.DateOffset
        The dateoffset to which the Timestamps will be adjusted.
    closed : {'right', 'left'}, default "left"
        Which side of bin interval is closed.
    origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
        The timestamp on which to adjust the grouping. The timezone of origin must
        match the timezone of the index.
        If a timestamp is not used, these values are also supported:

        - 'epoch': `origin` is 1970-01-01
        - 'start': `origin` is the first value of the timeseries
        - 'start_day': `origin` is the first day at midnight of the timeseries
    offset : pd.Timedelta, default is None
        An offset timedelta added to the origin.

    Returns
    -------
    A tuple of length 2, containing the adjusted pd.Timestamp objects.
    Nz4The origin must have the same timezone as the index.r�z
1970-01-01)r�)rVrZr[r�r�)
r�r:r�rr�r9r��_adjust_dates_anchored�	normalize�rollback)rr	rTr�rVrZr[�index_tzs        rmr�r��	sQ��R�$����8�8���f�i�(�f�i�i�4�.?�X�QU�EU�-V��S�T�T��W���|��9�F��d�C� ��%�%�d�+�E��#�#�D�)�D��&�)�,��+�+�D�1��,��4��f�V�F�QU�
���t��d�C� ��%�%�h�/�E��#�#�H�-�D��$�;�����!���~�~����V���d�m�m�E�2�3�E��e�d�l�+�E�����%���$�;�roc	�x�td�||fD��std��|j�}|j�}|j|�}|j|�}	t	|||d|||��\}}|t|�|zzj
|�}|t|	�|zz
j
|�}||fS)al
    Adjust the provided `first` and `last` Periods to the respective Period of
    the given offset that encompasses them.

    Parameters
    ----------
    first : pd.Period
        The beginning Period of the range to be adjusted.
    last : pd.Period
        The ending Period of the range to be adjusted.
    freq : pd.DateOffset
        The freq to which the Periods will be adjusted.
    closed : {'right', 'left'}, default "left"
        Which side of bin interval is closed.
    origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day'
        The timestamp on which to adjust the grouping. The timezone of origin must
        match the timezone of the index.

        If a timestamp is not used, these values are also supported:

        - 'epoch': `origin` is 1970-01-01
        - 'start': `origin` is the first value of the timeseries
        - 'start_day': `origin` is the first day at midnight of the timeseries
    offset : pd.Timedelta, default is None
        An offset timedelta added to the origin.

    Returns
    -------
    A tuple of length 2, containing the adjusted pd.Period objects.
    c3�<K�|]}t|t����y�wr)r�r)rtrRs  rmrvz*_get_period_range_edges.<locals>.<genexpr>r
s����@�-�3�z�#�v�&�-�s�z3'first' and 'last' must be instances of type Period�nsr�)�allr�r��is_on_offsetr�r<rr)
rr	rTrVrZr[�first_ts�last_ts�adjust_first�adjust_lasts
          rmr�r�L
s���L�@�5�$�-�@�@��M�N�N��!�!�#�H����!�G��(�(��2�2�L��#�#�G�,�K�2��'�4�d�6�&�QW���H�g���L�)�D�0�
0�;�;�D�A�E��c�+�&��-�-�8�8��>�D��$�;�roc��|dkDsJ�||z
}tj|d|�}|jdt�}|jdt�}|||fS)Nr)r�r�r)rOr�r�r�s    rmr�r��
s]��
�q�=��=��I��D�
�9�9�T�1�i�(�D��]�]�1�c�
"�F��]�]�1�c�
"�F��4���roc��|j|�}|j|�}|�|j|�}t|�j|�j}d}|dk(r|j�j}n�|dk(r
|j}n�t	|t
�r|j|�j}nV|dvrR|dk(r|n|j
d�}	|	j|jz
|z}
|dk(r|
dz
}
|	|
|zz
}|j}||r|jndz
}|j}|j}|�|jd	�}|�|jd	�}|j|z
|z}
|j|z
|z}|d
k(rI|
dkDr|j|
z
}n|j|z
}|dkDr|j||z
z}nU|j}nH|
dkDr|j|
z
}n|j}|dkDr|j||z
z}n|j|z}t||��}t||��}|� |jd	�j|�}|� |jd	�j|�}||fS)Nrr�r�r�r��Dr�r��UTCrir�)
r�r�_valuer
r�r�ceil�tzinfo�
tz_convertr�)rr	rTrVrZr[r��
freq_value�origin_timestamp�origin_last�sub_freq_times�first_tzinfo�last_tzinfo�foffset�loffset�fresult_int�lresult_int�fresult�lresults                   rmrr�
sx��
�M�M�$��E��<�<���D�
������%���4��(�(��.�5�5�J���
��� �?�?�,�3�3��	�7�	� �<�<��	�F�I�	&�!�>�>�$�/�6�6��	�%�	%�$��o�d�4�9�9�S�>��%�,�,�u�|�|�;�
�J���V���a��N��n�t�3�3�� �<�<�����
�
�Q�6��
�<�<�L��+�+�K���� � ��'�������u�%���|�|�.�.�*�<�G��{�{�-�-��;�G�
����Q�;��,�,��0�K��,�,��3�K��Q�;��+�+��g�)=�>�K��+�+�K��Q�;��,�,��0�K� �,�,�K��Q�;��+�+��g�)=�>�K��+�+�
�2�K���$�/�G���$�/�G����%�%�e�,�7�7��E�����%�%�e�,�7�7��D���G��roc�|�t|jt�r�|�td��|�d}t|t�rFt|d�r!t
|j|j�}ntd|j�d���|j�}|jj||��|_|St|j�dk(r-|j�}t|j|�|_|Sd}t|jt�r|jj }t#|jj%�|jj'�||�	�}|jj|_|j)|||�
�}|r|jj+�|_|S)z�
    Utility frequency conversion method for Series/DataFrame.

    See :meth:`pandas.NDFrame.asfreq` for full documentation.
    Nz"'method' argument is not supported�E�_period_dtype_codezInvalid offset: 'z.' for converting time series with PeriodIndex.r�r)rTr�)r�r�)r�r�r3r�r�hasattrrr�r�r��baser r�r�r�r1r�r2rrr"r
)	rRrTr�r�r
r�r�r��dtis	         rmr�r��
sc���#�)�)�[�)���%�&J�K�K��;��C��d�J�'��t�1�2�-�d�f�f�d�i�i�@�� �'��	�	�{�3(�)���
�(�(�*���	�	�(�(��3�(�7��
�"�N�

�S�Y�Y��1�	��(�(�*��&�s�y�y�$�7��
��N����c�i�i��/��9�9�>�>�D��������#�)�)�-�-�/��4�P���9�9�>�>����+�+�c�&�Z�+�H���#�M�M�3�3�5�G�M��Nroc�x�t|�dk7rtd��t|t�r|j	|��}|St|t
�r%tg|j||j��}|St|t�r%tg|j||j��}|Stt|���)z�
    Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex.

    Parameters
    ----------
    index : PeriodIndex, DatetimeIndex, or TimedeltaIndex
    freq : DateOffset

    Returns
    -------
    same type as index
    rzECan only set arbitrary freq for empty DatetimeIndex or TimedeltaIndexr�)r-rTr�)r�r�r�r3r�r1r-r�r5r�ry)r�rTr�s   rmr�r�s����5�z�Q���S�
�	
��%��%��L�L�d�L�+�	���

�E�=�	)�!�"�E�K�K�d����T�	�
��	
�E�>�	*�"�2�U�[�[�t�%�*�*�U�	�����U��$�$roc	���|duxrt|�dkD}|duxrt|�dkD}|r|rd}n|rd}n|rd}nytjd|�d|j�d|�d	�tt��
�y)a�
    Warn for deprecation of args and kwargs in resample functions.

    Parameters
    ----------
    cls : type
        Class to warn about.
    kernel : str
        Operation name.
    args : tuple or None
        args passed by user. Will be None if and only if kernel does not have args.
    kwargs : dict or None
        kwargs passed by user. Will be None if and only if kernel does not have kwargs.
    Nrzargs and kwargsr�r�zPassing additional z to �.zj has no impact on the result and is deprecated. This will raise a TypeError in a future version of pandas.)�categoryr�)r�r�r�rzr�r)�cls�kernelr�r��	warn_args�warn_kwargsr�s       rmr�r�:s����D� �2�S��Y��]�I���$�8��V��q��K��[���	���	�����M�M�
�c�U�$�s�|�|�n�A�f�X�>;�	;��#�%�roc��d}tjdd�}t|t|��5|j|g|��d|i|��}ddd�|S#1swYSxYw)Nz7DataFrameGroupBy.apply operated on the grouping columns�DataFrameGroupBy�resample)�target_message�target_category�new_messager^)r*�formatr�DeprecationWarningrH)r�r�r^r�r�r>r@r�s        rmr�r�\sh��O�N�'�.�.�/A�:�N�K�	�%�*��
�
����s�S�T�S�.�S�F�S��
��M�

��M�s�A�Ar)rRzSeries | DataFramer6rK)NNNNNT)r�r)r�r;r^r5r6rK)r)
rRrr�znpt.NDArray[np.intp]r�r0rUr>r6r)r�r�N)rrr	rrTrr�r�rV�Literal['right', 'left']rZrDr[�Timedelta | Noner6�tuple[Timestamp, Timestamp])rrr	rrTrrVrCrZrDr[rDr6ztuple[Period, Period])
rOr3r�z
np.ndarrayr�r3r�r<r6z+tuple[PeriodIndex, np.ndarray, PeriodIndex])rir�Nr)rrr	rrTr:rVrCrZrDr[rDr�r�r6rE)NNFN)rRrr
r5r6r)r�z,DatetimeIndex | PeriodIndex | TimedeltaIndex)r8r�r6r7)r�r)r�rr^r5r6rG)��
__future__rr �textwrapr�typingrrrrr	r
r��numpyr��pandas._libsr�pandas._libs.tslibsrr
rrrrr�pandas._libs.tslibs.dtypesr�pandas._typingr�pandas.compat.numpyrr��
pandas.errorsr�pandas.util._decoratorsrrr�pandas.util._exceptionsrr�pandas.core.dtypes.dtypesr�pandas.core.dtypes.genericrr�pandas.core.algorithms�core�
algorithmsr�pandas.core.applyr r!�pandas.core.arraysr"�pandas.core.baser#r$�pandas.core.common�commonrc�pandas.core.genericr%r&�pandas.core.groupby.genericr'�pandas.core.groupby.groupbyr(r)r*r+r,�pandas.core.groupby.grouperr-�pandas.core.groupby.opsr.�pandas.core.indexes.apir/�pandas.core.indexes.baser0�pandas.core.indexes.datetimesr1r2�pandas.core.indexes.periodr3r4�pandas.core.indexes.timedeltasr5r6�pandas.tseries.frequenciesr7r8�pandas.tseries.offsetsr9r:�collections.abcr;r<r=r>r?r@rArBrCrDrErFr/rGrHrIrCrKrLrZr\ryr{r�r�r�rBr�rMr�r�r�r�rr�r�r�r�r�rorm�<module>ris���"�����������>�#�.�-���
�
1��
'�&��3��!� ��6���0�.�.�*����
��
�(������
')��^�(�^;��\�^;�B/e�L�.�e�PC�Y�C�P	&��)�	&�r*�1�r*�n	$��'�	$��4��0	'��*�	'�-�"�)�)�
��	
���	
����
��
������,I �'�I �ZUV�K�	�K�0�K�=B�K�JQ�K�
�K�0(.� +�#�L��L�
�L��L��	L�

%�L�
�
L�
�L�!�L�f(.� +�#�
5��5�
�5��5�
%�	5�

�5�
�
5��5�p �� �)� �3>� �KN� �0� �,(/� +�#��Q��Q�
�Q��Q�
%�	Q�

�Q�
�
Q��Q�!�Q�n����
/�	�/�
�/��/�d�<�D�
��#��<@���ro

Sindbad File Manager Version 1.0, Coded By Sindbad EG ~ The Terrorists